Bug 1004116 - Began to #include MIRInstruction.h into appropriate files. draft
authorChris Mander <mandercs3@gmail.com>
Mon, 11 Dec 2017 13:18:03 +0100
changeset 710641 81dd70a9447b607cb62384eb423b8fb26f5ce471
parent 703432 b2200418de0bf215a84d84317d247363536369dd
child 710642 488223b2bfe0498af612447b805d9da919f85db6
push id92870
push userbmo:mandercs3@gmail.com
push dateMon, 11 Dec 2017 12:19:55 +0000
bugs1004116
milestone59.0a1
Bug 1004116 - Began to #include MIRInstruction.h into appropriate files. MozReview-Commit-ID: ItPh95We0E
devtools/client/netmonitor/src/reducers/filters.js.orig
js/src/jit/AliasAnalysis.cpp
js/src/jit/BaselineInspector.cpp
js/src/jit/BaselineInspector.h
js/src/jit/CodeGenerator.cpp
js/src/jit/EagerSimdUnbox.cpp
js/src/jit/EffectiveAddressAnalysis.cpp
js/src/jit/FoldLinearArithConstants.cpp
js/src/jit/Ion.cpp
js/src/jit/IonAnalysis.cpp
js/src/jit/IonAnalysis.h
js/src/jit/IonBuilder.cpp
js/src/jit/IonBuilder.h
js/src/jit/JSONSpewer.cpp
js/src/jit/LoopUnroller.cpp
js/src/jit/Lowering.cpp
js/src/jit/Lowering.h
js/src/jit/MCallOptimize.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MIRGraph.cpp
js/src/jit/MIRGraph.h
js/src/jit/MIRInstruction.h
js/src/jit/MacroAssembler.cpp
js/src/jit/RangeAnalysis.cpp
js/src/jit/Recover.cpp
js/src/jit/Recover.h
js/src/jit/ScalarReplacement.cpp
js/src/jit/ValueNumbering.cpp
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/CodeGenerator-arm.h
js/src/jit/arm/LIR-arm.h
js/src/jit/arm/Lowering-arm.cpp
js/src/jit/arm/Lowering-arm.h
js/src/jit/arm64/Lowering-arm64.cpp
js/src/jit/arm64/Lowering-arm64.h
js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
js/src/jit/mips-shared/Lowering-mips-shared.cpp
js/src/jit/mips-shared/Lowering-mips-shared.h
js/src/jit/mips32/CodeGenerator-mips32.cpp
js/src/jit/mips32/LIR-mips32.h
js/src/jit/mips32/Lowering-mips32.cpp
js/src/jit/mips32/Lowering-mips32.h
js/src/jit/mips64/CodeGenerator-mips64.cpp
js/src/jit/mips64/LIR-mips64.h
js/src/jit/mips64/Lowering-mips64.cpp
js/src/jit/none/Lowering-none.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/shared/CodeGenerator-shared.h
js/src/jit/shared/LIR-shared.h
js/src/jit/shared/Lowering-shared-inl.h
js/src/jit/shared/Lowering-shared.cpp
js/src/jit/shared/Lowering-shared.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x64/LIR-x64.h
js/src/jit/x64/Lowering-x64.cpp
js/src/jit/x64/Lowering-x64.h
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86-shared/Lowering-x86-shared.cpp
js/src/jit/x86-shared/Lowering-x86-shared.h
js/src/jit/x86/Lowering-x86.cpp
js/src/jit/x86/Lowering-x86.h
js/src/jsapi-tests/testJitFoldsTo.cpp
js/src/jsapi-tests/testJitGVN.cpp
js/src/jsapi-tests/testJitRangeAnalysis.cpp
js/src/wasm/WasmBaselineCompile.cpp.orig
js/src/wasm/WasmIonCompile.cpp
new file mode 100644
--- /dev/null
+++ b/devtools/client/netmonitor/src/reducers/filters.js.orig
@@ -0,0 +1,78 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+const {
+  ENABLE_REQUEST_FILTER_TYPE_ONLY,
+  TOGGLE_REQUEST_FILTER_TYPE,
+  SET_REQUEST_FILTER_TEXT,
+  FILTER_TAGS
+} = require("../constants");
+
+const FilterTypes = I.Record(["all"]
+  .concat(FILTER_TAGS)
+  .reduce((o, tag) => Object.assign(o, { [tag]: false }), {})
+);
+
+const Filters = I.Record({
+  requestFilterTypes: new FilterTypes({ all: true }),
+  requestFilterText: "",
+});
+
+function toggleRequestFilterType(state, action) {
+  let { filter } = action;
+  let newState;
+
+  // Ignore unknown filter type
+  if (!state.has(filter)) {
+    return state;
+  }
+  if (filter === "all") {
+    return new FilterTypes({ all: true });
+  }
+
+  newState = state.withMutations(types => {
+    types.set("all", false);
+    types.set(filter, !state.get(filter));
+  });
+
+  if (!newState.includes(true)) {
+    newState = new FilterTypes({ all: true });
+  }
+
+  return newState;
+}
+
+function enableRequestFilterTypeOnly(state, action) {
+  let { filter } = action;
+
+  // Ignore unknown filter type
+  if (!state.has(filter)) {
+    return state;
+  }
+
+  return new FilterTypes({ [filter]: true });
+}
+
+function filters(state = new Filters(), action) {
+  switch (action.type) {
+    case ENABLE_REQUEST_FILTER_TYPE_ONLY:
+      return state.set("requestFilterTypes",
+        enableRequestFilterTypeOnly(state.requestFilterTypes, action));
+    case TOGGLE_REQUEST_FILTER_TYPE:
+      return state.set("requestFilterTypes",
+        toggleRequestFilterType(state.requestFilterTypes, action));
+    case SET_REQUEST_FILTER_TEXT:
+      return state.set("requestFilterText", action.text);
+    default:
+      return state;
+  }
+}
+
+module.exports = {
+  FilterTypes,
+  Filters,
+  filters
+};
--- a/js/src/jit/AliasAnalysis.cpp
+++ b/js/src/jit/AliasAnalysis.cpp
@@ -9,16 +9,17 @@
 #include <stdio.h>
 
 #include "jit/AliasAnalysisShared.h"
 #include "jit/Ion.h"
 #include "jit/IonBuilder.h"
 #include "jit/JitSpewer.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 
 #include "vm/Printer.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::Array;
 
--- a/js/src/jit/BaselineInspector.cpp
+++ b/js/src/jit/BaselineInspector.cpp
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/BaselineInspector.h"
 
 #include "mozilla/DebugOnly.h"
 
 #include "jit/BaselineIC.h"
 #include "jit/CacheIRCompiler.h"
+#include "jit/MIRInstruction.h"
 
 #include "jsscriptinlines.h"
 
 #include "vm/EnvironmentObject-inl.h"
 #include "vm/ObjectGroup-inl.h"
 #include "vm/ReceiverGuard-inl.h"
 
 using namespace js;
--- a/js/src/jit/BaselineInspector.h
+++ b/js/src/jit/BaselineInspector.h
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_BaselineInspector_h
 #define jit_BaselineInspector_h
 
 #include "jit/BaselineIC.h"
 #include "jit/BaselineJIT.h"
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 
 namespace js {
 namespace jit {
 
 class BaselineInspector;
 
 class ICInspector
 {
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -32,16 +32,17 @@
 #include "jit/IonBuilder.h"
 #include "jit/IonIC.h"
 #include "jit/IonOptimizationLevels.h"
 #include "jit/JitcodeMap.h"
 #include "jit/JitSpewer.h"
 #include "jit/Linker.h"
 #include "jit/Lowering.h"
 #include "jit/MIRGenerator.h"
+#include "jit/MIRInstruction.h"
 #include "jit/MoveEmitter.h"
 #include "jit/RangeAnalysis.h"
 #include "jit/SharedICHelpers.h"
 #include "jit/StackSlotAllocator.h"
 #include "vm/AsyncFunction.h"
 #include "vm/AsyncIteration.h"
 #include "vm/MatchPairs.h"
 #include "vm/RegExpObject.h"
--- a/js/src/jit/EagerSimdUnbox.cpp
+++ b/js/src/jit/EagerSimdUnbox.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/EagerSimdUnbox.h"
 
 #include "jit/MIR.h"
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 
 namespace js {
 namespace jit {
 
 // Do not optimize any Phi instruction which has conflicting Unbox operations,
 // as this might imply some intended polymorphism.
 static bool
 CanUnboxSimdPhi(const JitCompartment* jitCompartment, MPhi* phi, SimdType unboxType)
--- a/js/src/jit/EffectiveAddressAnalysis.cpp
+++ b/js/src/jit/EffectiveAddressAnalysis.cpp
@@ -6,16 +6,17 @@
 
 #include "jit/EffectiveAddressAnalysis.h"
 
 #include "jsnum.h"
 
 #include "jit/IonAnalysis.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 
 using namespace js;
 using namespace jit;
 
 static void
 AnalyzeLsh(TempAllocator& alloc, MLsh* lsh)
 {
     if (lsh->specialization() != MIRType::Int32)
--- a/js/src/jit/FoldLinearArithConstants.cpp
+++ b/js/src/jit/FoldLinearArithConstants.cpp
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/FoldLinearArithConstants.h"
 
 #include "jit/IonAnalysis.h"
 #include "jit/MIR.h"
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 
 using namespace js;
 using namespace jit;
 
 namespace js {
 namespace jit {
 
 // Mark this node and its children as RecoveredOnBailout when they are not used.
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -36,16 +36,17 @@
 #include "jit/JitCommon.h"
 #include "jit/JitCompartment.h"
 #include "jit/JitSpewer.h"
 #include "jit/LICM.h"
 #include "jit/Linker.h"
 #include "jit/LIR.h"
 #include "jit/LoopUnroller.h"
 #include "jit/Lowering.h"
+#include "jit/MIRInstruction.h"
 #include "jit/PerfSpewer.h"
 #include "jit/RangeAnalysis.h"
 #include "jit/ScalarReplacement.h"
 #include "jit/Sink.h"
 #include "jit/StupidAllocator.h"
 #include "jit/ValueNumbering.h"
 #include "jit/WasmBCE.h"
 #include "vm/Debugger.h"
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -12,16 +12,17 @@
 #include "jit/BaselineJIT.h"
 #include "jit/FlowAliasAnalysis.h"
 #include "jit/Ion.h"
 #include "jit/IonBuilder.h"
 #include "jit/IonOptimizationLevels.h"
 #include "jit/LIR.h"
 #include "jit/Lowering.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "vm/RegExpObject.h"
 #include "vm/SelfHosting.h"
 
 #include "jsobjinlines.h"
 #include "jsopcodeinlines.h"
 #include "jsscriptinlines.h"
 
 #include "jit/shared/Lowering-shared-inl.h"
--- a/js/src/jit/IonAnalysis.h
+++ b/js/src/jit/IonAnalysis.h
@@ -6,16 +6,17 @@
 
 #ifndef jit_IonAnalysis_h
 #define jit_IonAnalysis_h
 
 // This file declares various analysis passes that operate on MIR.
 
 #include "jit/JitAllocPolicy.h"
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 
 namespace js {
 namespace jit {
 
 class MIRGenerator;
 class MIRGraph;
 
 MOZ_MUST_USE bool
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -15,16 +15,17 @@
 #include "jit/BaselineFrame.h"
 #include "jit/BaselineInspector.h"
 #include "jit/Ion.h"
 #include "jit/IonControlFlow.h"
 #include "jit/IonOptimizationLevels.h"
 #include "jit/JitSpewer.h"
 #include "jit/Lowering.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "vm/ArgumentsObject.h"
 #include "vm/Opcodes.h"
 #include "vm/RegExpStatics.h"
 #include "vm/TraceLogging.h"
 
 #include "jsopcodeinlines.h"
 #include "jsscriptinlines.h"
 
--- a/js/src/jit/IonBuilder.h
+++ b/js/src/jit/IonBuilder.h
@@ -14,16 +14,17 @@
 
 #include "jit/BaselineInspector.h"
 #include "jit/BytecodeAnalysis.h"
 #include "jit/IonAnalysis.h"
 #include "jit/IonControlFlow.h"
 #include "jit/IonOptimizationLevels.h"
 #include "jit/MIR.h"
 #include "jit/MIRGenerator.h"
+#include "jit/MIRInstruction.h"
 #include "jit/MIRGraph.h"
 #include "jit/OptimizationTracking.h"
 
 namespace js {
 namespace jit {
 
 class CodeGenerator;
 class CallInfo;
--- a/js/src/jit/JSONSpewer.cpp
+++ b/js/src/jit/JSONSpewer.cpp
@@ -10,16 +10,17 @@
 
 
 #include <stdarg.h>
 
 #include "jit/BacktrackingAllocator.h"
 #include "jit/LIR.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "jit/RangeAnalysis.h"
 
 using namespace js;
 using namespace js::jit;
 
 void
 JSONSpewer::beginFunction(JSScript* script)
 {
--- a/js/src/jit/LoopUnroller.cpp
+++ b/js/src/jit/LoopUnroller.cpp
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/LoopUnroller.h"
 
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::ArrayLength;
 
 namespace {
 
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -7,16 +7,17 @@
 #include "jit/Lowering.h"
 
 #include "mozilla/DebugOnly.h"
 
 #include "jit/JitSpewer.h"
 #include "jit/LIR.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "wasm/WasmSignalHandlers.h"
 
 #include "jsobjinlines.h"
 #include "jsopcodeinlines.h"
 
 #include "jit/shared/Lowering-shared-inl.h"
 
 using namespace js;
--- a/js/src/jit/Lowering.h
+++ b/js/src/jit/Lowering.h
@@ -6,16 +6,17 @@
 
 #ifndef jit_Lowering_h
 #define jit_Lowering_h
 
 // This file declares the structures that are used for attaching LIR to a
 // MIRGraph.
 
 #include "jit/LIR.h"
+#include "jit/MIRInstruction.h"
 #if defined(JS_CODEGEN_X86)
 # include "jit/x86/Lowering-x86.h"
 #elif defined(JS_CODEGEN_X64)
 # include "jit/x64/Lowering-x64.h"
 #elif defined(JS_CODEGEN_ARM)
 # include "jit/arm/Lowering-arm.h"
 #elif defined(JS_CODEGEN_ARM64)
 # include "jit/arm64/Lowering-arm64.h"
--- a/js/src/jit/MCallOptimize.cpp
+++ b/js/src/jit/MCallOptimize.cpp
@@ -17,16 +17,17 @@
 #include "builtin/TestingFunctions.h"
 #include "builtin/TypedObject.h"
 #include "jit/BaselineInspector.h"
 #include "jit/InlinableNatives.h"
 #include "jit/IonBuilder.h"
 #include "jit/Lowering.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "vm/ArgumentsObject.h"
 #include "vm/ProxyObject.h"
 #include "vm/SelfHosting.h"
 #include "vm/TypedArrayObject.h"
 
 #include "jsscriptinlines.h"
 
 #include "jit/shared/Lowering-shared-inl.h"
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -17,16 +17,17 @@
 #include "jsstr.h"
 
 #include "builtin/RegExp.h"
 #include "jit/AtomicOperations.h"
 #include "jit/BaselineInspector.h"
 #include "jit/IonBuilder.h"
 #include "jit/JitSpewer.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "jit/RangeAnalysis.h"
 #include "js/Conversions.h"
 
 #include "jsatominlines.h"
 #include "jsboolinlines.h"
 #include "jsobjinlines.h"
 #include "jsscriptinlines.h"
 #include "vm/UnboxedObject-inl.h"
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -32,16 +32,98 @@
 #include "vm/EnvironmentObject.h"
 #include "vm/RegExpObject.h"
 #include "vm/SharedMem.h"
 #include "vm/TypedArrayObject.h"
 #include "vm/UnboxedObject.h"
 
 namespace js {
 
+// Declared and defined in MIRInstruction.h
+class MConstant;
+class MWasmFloatConstant;
+class MSimdConstant;
+class MBail;
+class MEncodeSnapshot;
+class MRandom;
+class MConstantElements;
+class MSimdSplat;
+class MSimdConvert;
+class MReinterpretCast;
+class MSimdExtractElement;
+class MSimdAllTrue;
+class MSimdAnyTrue;
+class MSimdSwizzle;
+class MSimdUnaryArith;
+class MNewArray;
+class MNewObject;
+class MSimdBox;
+class MBox;
+class MUnbox;
+class ExtendInt32ToInt64;
+class MToInt32;
+class MTruncateToInt32;
+class MToString;
+class MToObject;
+class MToObjectOrNull;
+class MBitNot;
+class MSignExtendInt32;
+class MSignExtendInt64;
+class MAbs;
+class MSqrt;
+class MPowHalf;
+class MMathFunction;
+class MFromCharCode;
+class MFromCodePoint;
+class MSinCos;
+class MNaNToZero;
+class MSlots;
+class MElements;
+class MInitializedLength;
+class ArrayLength;
+class MTypedArrayElements
+class MTypedObjectElements;
+class MNot;
+class MConvertUnboxedObjectToNative;
+class MArrayPopShift;
+class MClampToUint8;
+class MLoadFixedSlot;
+class MGetPropertyPolymorphic;
+class MGuardReceiverPolymorphic;
+class MGuardClass;
+class MLoadSlot;
+class MStringLength;
+class MFloor;
+class MCeil;
+class MRound;
+class MNearbyInt;
+class MTypeBarrier;
+class MNewCallObjectBase;
+class MAtomicIsLockFree;
+class MWasmReinterpret;
+class MGetDynamicName;
+class MCompare;
+class MBinaryBitInstruction;
+class MBinaryArithInstruction;
+class MMinMax;
+class MCopySign;
+class MAtan2;
+class MPow;
+class MConcat;
+class MCharCodeAt;
+class MSimdInsertElement;
+class MSimdShuffle;
+class MSimdBinaryComp;
+class MSimdBinaryArith;
+class MSimdBinarySaturating;
+class MSimdBinaryBitwise;
+class MSimdShift;
+class MSetInitializedLength;
+class MBoundsCheck;
+
 class StringObject;
 
 namespace jit {
 
 class BaselineInspector;
 class Range;
 
 template <typename T>
@@ -1425,33 +1507,16 @@ class MTernaryInstruction : public MAryI
         initOperand(0, first);
         initOperand(1, second);
         initOperand(2, third);
     }
 
     HashNumber valueHash() const override;
 };
 
-class MQuaternaryInstruction : public MAryInstruction<4>
-{
-  protected:
-    MQuaternaryInstruction(Opcode op,
-                           MDefinition* first, MDefinition* second,
-                           MDefinition* third, MDefinition* fourth)
-      : MAryInstruction(op)
-    {
-        initOperand(0, first);
-        initOperand(1, second);
-        initOperand(2, third);
-        initOperand(3, fourth);
-    }
-
-    HashNumber valueHash() const override;
-};
-
 template <class T>
 class MVariadicT : public T
 {
     FixedList<MUse> operands_;
 
   protected:
     explicit MVariadicT(typename T::Opcode op)
       : T(op)
@@ -1485,687 +1550,16 @@ class MVariadicT : public T
     }
     void replaceOperand(size_t index, MDefinition* operand) final override {
         operands_[index].replaceProducer(operand);
     }
 };
 
 typedef MVariadicT<MInstruction> MVariadicInstruction;
 
-// Generates an LSnapshot without further effect.
-class MStart : public MNullaryInstruction
-{
-    MStart()
-      : MNullaryInstruction(classOpcode)
-    { }
-
-  public:
-    INSTRUCTION_HEADER(Start)
-    TRIVIAL_NEW_WRAPPERS
-};
-
-// Instruction marking on entrypoint for on-stack replacement.
-// OSR may occur at loop headers (at JSOP_TRACE).
-// There is at most one MOsrEntry per MIRGraph.
-class MOsrEntry : public MNullaryInstruction
-{
-  protected:
-    MOsrEntry()
-      : MNullaryInstruction(classOpcode)
-    {
-        setResultType(MIRType::Pointer);
-    }
-
-  public:
-    INSTRUCTION_HEADER(OsrEntry)
-    TRIVIAL_NEW_WRAPPERS
-};
-
-// No-op instruction. This cannot be moved or eliminated, and is intended for
-// anchoring resume points at arbitrary points in a block.
-class MNop : public MNullaryInstruction
-{
-  protected:
-    MNop()
-      : MNullaryInstruction(classOpcode)
-    { }
-
-  public:
-    INSTRUCTION_HEADER(Nop)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    ALLOW_CLONE(MNop)
-};
-
-// Truncation barrier. This is intended for protecting its input against
-// follow-up truncation optimizations.
-class MLimitedTruncate
-  : public MUnaryInstruction,
-    public ConvertToInt32Policy<0>::Data
-{
-  public:
-    TruncateKind truncate_;
-    TruncateKind truncateLimit_;
-
-  protected:
-    MLimitedTruncate(MDefinition* input, TruncateKind limit)
-      : MUnaryInstruction(classOpcode, input),
-        truncate_(NoTruncate),
-        truncateLimit_(limit)
-    {
-        setResultType(MIRType::Int32);
-        setResultTypeSet(input->resultTypeSet());
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(LimitedTruncate)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-    bool needTruncation(TruncateKind kind) override;
-    TruncateKind operandTruncateKind(size_t index) const override;
-    TruncateKind truncateKind() const {
-        return truncate_;
-    }
-    void setTruncateKind(TruncateKind kind) {
-        truncate_ = kind;
-    }
-};
-
-// A constant js::Value.
-class MConstant : public MNullaryInstruction
-{
-    struct Payload {
-        union {
-            bool b;
-            int32_t i32;
-            int64_t i64;
-            float f;
-            double d;
-            JSString* str;
-            JS::Symbol* sym;
-            JSObject* obj;
-            uint64_t asBits;
-        };
-        Payload() : asBits(0) {}
-    };
-
-    Payload payload_;
-
-    static_assert(sizeof(Payload) == sizeof(uint64_t),
-                  "asBits must be big enough for all payload bits");
-
-#ifdef DEBUG
-    void assertInitializedPayload() const;
-#else
-    void assertInitializedPayload() const {}
-#endif
-
-  protected:
-    MConstant(TempAllocator& alloc, const Value& v, CompilerConstraintList* constraints);
-    explicit MConstant(JSObject* obj);
-    explicit MConstant(float f);
-    explicit MConstant(int64_t i);
-
-  public:
-    INSTRUCTION_HEADER(Constant)
-    static MConstant* New(TempAllocator& alloc, const Value& v,
-                          CompilerConstraintList* constraints = nullptr);
-    static MConstant* New(TempAllocator::Fallible alloc, const Value& v,
-                          CompilerConstraintList* constraints = nullptr);
-    static MConstant* New(TempAllocator& alloc, const Value& v, MIRType type);
-    static MConstant* NewFloat32(TempAllocator& alloc, double d);
-    static MConstant* NewInt64(TempAllocator& alloc, int64_t i);
-    static MConstant* NewConstraintlessObject(TempAllocator& alloc, JSObject* v);
-    static MConstant* Copy(TempAllocator& alloc, MConstant* src) {
-        return new(alloc) MConstant(*src);
-    }
-
-    // Try to convert this constant to boolean, similar to js::ToBoolean.
-    // Returns false if the type is MIRType::Magic*.
-    bool MOZ_MUST_USE valueToBoolean(bool* res) const;
-
-    // Like valueToBoolean, but returns the result directly instead of using
-    // an outparam. Should not be used if this constant might be a magic value.
-    bool valueToBooleanInfallible() const {
-        bool res;
-        MOZ_ALWAYS_TRUE(valueToBoolean(&res));
-        return res;
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    HashNumber valueHash() const override;
-    bool congruentTo(const MDefinition* ins) const override;
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    MOZ_MUST_USE bool updateForReplacement(MDefinition* def) override {
-        MConstant* c = def->toConstant();
-        // During constant folding, we don't want to replace a float32
-        // value by a double value.
-        if (type() == MIRType::Float32)
-            return c->type() == MIRType::Float32;
-        if (type() == MIRType::Double)
-            return c->type() != MIRType::Float32;
-        return true;
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-    bool needTruncation(TruncateKind kind) override;
-    void truncate() override;
-
-    bool canProduceFloat32() const override;
-
-    ALLOW_CLONE(MConstant)
-
-    bool equals(const MConstant* other) const {
-        assertInitializedPayload();
-        return type() == other->type() && payload_.asBits == other->payload_.asBits;
-    }
-
-    bool toBoolean() const {
-        MOZ_ASSERT(type() == MIRType::Boolean);
-        return payload_.b;
-    }
-    int32_t toInt32() const {
-        MOZ_ASSERT(type() == MIRType::Int32);
-        return payload_.i32;
-    }
-    int64_t toInt64() const {
-        MOZ_ASSERT(type() == MIRType::Int64);
-        return payload_.i64;
-    }
-    bool isInt32(int32_t i) const {
-        return type() == MIRType::Int32 && payload_.i32 == i;
-    }
-    const double& toDouble() const {
-        MOZ_ASSERT(type() == MIRType::Double);
-        return payload_.d;
-    }
-    const float& toFloat32() const {
-        MOZ_ASSERT(type() == MIRType::Float32);
-        return payload_.f;
-    }
-    JSString* toString() const {
-        MOZ_ASSERT(type() == MIRType::String);
-        return payload_.str;
-    }
-    JS::Symbol* toSymbol() const {
-        MOZ_ASSERT(type() == MIRType::Symbol);
-        return payload_.sym;
-    }
-    JSObject& toObject() const {
-        MOZ_ASSERT(type() == MIRType::Object);
-        return *payload_.obj;
-    }
-    JSObject* toObjectOrNull() const {
-        if (type() == MIRType::Object)
-            return payload_.obj;
-        MOZ_ASSERT(type() == MIRType::Null);
-        return nullptr;
-    }
-
-    bool isTypeRepresentableAsDouble() const {
-        return IsTypeRepresentableAsDouble(type());
-    }
-    double numberToDouble() const {
-        MOZ_ASSERT(isTypeRepresentableAsDouble());
-        if (type() == MIRType::Int32)
-            return toInt32();
-        if (type() == MIRType::Double)
-            return toDouble();
-        return toFloat32();
-    }
-
-    // Convert this constant to a js::Value. Float32 constants will be stored
-    // as DoubleValue and NaNs are canonicalized. Callers must be careful: not
-    // all constants can be represented by js::Value (wasm supports int64).
-    Value toJSValue() const;
-
-    bool appendRoots(MRootList& roots) const override;
-};
-
-// Floating-point value as created by wasm. Just a constant value, used to
-// effectively inhibite all the MIR optimizations. This uses the same LIR nodes
-// as a MConstant of the same type would.
-class MWasmFloatConstant : public MNullaryInstruction
-{
-    union {
-        float f32_;
-        double f64_;
-        uint64_t bits_;
-    } u;
-
-    explicit MWasmFloatConstant(MIRType type)
-      : MNullaryInstruction(classOpcode)
-    {
-        u.bits_ = 0;
-        setResultType(type);
-    }
-
-  public:
-    INSTRUCTION_HEADER(WasmFloatConstant)
-
-    static MWasmFloatConstant* NewDouble(TempAllocator& alloc, double d) {
-        auto* ret = new(alloc) MWasmFloatConstant(MIRType::Double);
-        ret->u.f64_ = d;
-        return ret;
-    }
-
-    static MWasmFloatConstant* NewFloat32(TempAllocator& alloc, float f) {
-        auto* ret = new(alloc) MWasmFloatConstant(MIRType::Float32);
-        ret->u.f32_ = f;
-        return ret;
-    }
-
-    HashNumber valueHash() const override;
-    bool congruentTo(const MDefinition* ins) const override;
-    AliasSet getAliasSet() const override { return AliasSet::None(); }
-
-    const double& toDouble() const {
-        MOZ_ASSERT(type() == MIRType::Double);
-        return u.f64_;
-    }
-    const float& toFloat32() const {
-        MOZ_ASSERT(type() == MIRType::Float32);
-        return u.f32_;
-    }
-};
-
-// Generic constructor of SIMD valuesX4.
-class MSimdValueX4
-  : public MQuaternaryInstruction,
-    public MixPolicy<SimdScalarPolicy<0>, SimdScalarPolicy<1>,
-                     SimdScalarPolicy<2>, SimdScalarPolicy<3> >::Data
-{
-  protected:
-    MSimdValueX4(MIRType type, MDefinition* x, MDefinition* y, MDefinition* z, MDefinition* w)
-      : MQuaternaryInstruction(classOpcode, x, y, z, w)
-    {
-        MOZ_ASSERT(IsSimdType(type));
-        MOZ_ASSERT(SimdTypeToLength(type) == 4);
-
-        setMovable();
-        setResultType(type);
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdValueX4)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool canConsumeFloat32(MUse* use) const override {
-        return SimdTypeToLaneType(type()) == MIRType::Float32;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    ALLOW_CLONE(MSimdValueX4)
-};
-
-// Generic constructor of SIMD values with identical lanes.
-class MSimdSplat
-  : public MUnaryInstruction,
-    public SimdScalarPolicy<0>::Data
-{
-  protected:
-    MSimdSplat(MDefinition* v, MIRType type)
-      : MUnaryInstruction(classOpcode, v)
-    {
-        MOZ_ASSERT(IsSimdType(type));
-        setMovable();
-        setResultType(type);
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdSplat)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool canConsumeFloat32(MUse* use) const override {
-        return SimdTypeToLaneType(type()) == MIRType::Float32;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    ALLOW_CLONE(MSimdSplat)
-};
-
-// A constant SIMD value.
-class MSimdConstant
-  : public MNullaryInstruction
-{
-    SimdConstant value_;
-
-  protected:
-    MSimdConstant(const SimdConstant& v, MIRType type)
-      : MNullaryInstruction(classOpcode),
-        value_(v)
-    {
-        MOZ_ASSERT(IsSimdType(type));
-        setMovable();
-        setResultType(type);
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdConstant)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isSimdConstant())
-            return false;
-        // Bool32x4 and Int32x4 share the same underlying SimdConstant representation.
-        if (type() != ins->type())
-            return false;
-        return value() == ins->toSimdConstant()->value();
-    }
-
-    const SimdConstant& value() const {
-        return value_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    ALLOW_CLONE(MSimdConstant)
-};
-
-// Converts all lanes of a given vector into the type of another vector
-class MSimdConvert
-  : public MUnaryInstruction,
-    public SimdPolicy<0>::Data
-{
-    // When either fromType or toType is an integer vector, should it be treated
-    // as signed or unsigned. Note that we don't support int-int conversions -
-    // use MSimdReinterpretCast for that.
-    SimdSign sign_;
-    wasm::BytecodeOffset bytecodeOffset_;
-
-    MSimdConvert(MDefinition* obj, MIRType toType, SimdSign sign,
-                 wasm::BytecodeOffset bytecodeOffset)
-      : MUnaryInstruction(classOpcode, obj), sign_(sign), bytecodeOffset_(bytecodeOffset)
-    {
-        MIRType fromType = obj->type();
-        MOZ_ASSERT(IsSimdType(fromType));
-        MOZ_ASSERT(IsSimdType(toType));
-        // All conversions are int <-> float, so signedness is required.
-        MOZ_ASSERT(sign != SimdSign::NotApplicable);
-
-        setResultType(toType);
-        specialization_ = fromType; // expects fromType as input
-
-        setMovable();
-        if (IsFloatingPointSimdType(fromType) && IsIntegerSimdType(toType)) {
-            // Does the extra range check => do not remove
-            setGuard();
-        }
-    }
-
-    static MSimdConvert* New(TempAllocator& alloc, MDefinition* obj, MIRType toType, SimdSign sign,
-                             wasm::BytecodeOffset bytecodeOffset)
-    {
-        return new (alloc) MSimdConvert(obj, toType, sign, bytecodeOffset);
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdConvert)
-
-    // Create a MSimdConvert instruction and add it to the basic block.
-    // Possibly create and add an equivalent sequence of instructions instead if
-    // the current target doesn't support the requested conversion directly.
-    // Return the inserted MInstruction that computes the converted value.
-    static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* obj,
-                                      MIRType toType, SimdSign sign,
-                                      wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset());
-
-    SimdSign signedness() const {
-        return sign_;
-    }
-    wasm::BytecodeOffset bytecodeOffset() const {
-        return bytecodeOffset_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!congruentIfOperandsEqual(ins))
-            return false;
-        const MSimdConvert* other = ins->toSimdConvert();
-        return sign_ == other->sign_;
-    }
-    ALLOW_CLONE(MSimdConvert)
-};
-
-// Casts bits of a vector input to another SIMD type (doesn't generate code).
-class MSimdReinterpretCast
-  : public MUnaryInstruction,
-    public SimdPolicy<0>::Data
-{
-    MSimdReinterpretCast(MDefinition* obj, MIRType toType)
-      : MUnaryInstruction(classOpcode, obj)
-    {
-        MIRType fromType = obj->type();
-        MOZ_ASSERT(IsSimdType(fromType));
-        MOZ_ASSERT(IsSimdType(toType));
-        setMovable();
-        setResultType(toType);
-        specialization_ = fromType; // expects fromType as input
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdReinterpretCast)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    ALLOW_CLONE(MSimdReinterpretCast)
-};
-
-// Extracts a lane element from a given vector type, given by its lane symbol.
-//
-// For integer SIMD types, a SimdSign must be provided so the lane value can be
-// converted to a scalar correctly.
-class MSimdExtractElement
-  : public MUnaryInstruction,
-    public SimdPolicy<0>::Data
-{
-  protected:
-    unsigned lane_;
-    SimdSign sign_;
-
-    MSimdExtractElement(MDefinition* obj, MIRType laneType, unsigned lane, SimdSign sign)
-      : MUnaryInstruction(classOpcode, obj), lane_(lane), sign_(sign)
-    {
-        MIRType vecType = obj->type();
-        MOZ_ASSERT(IsSimdType(vecType));
-        MOZ_ASSERT(lane < SimdTypeToLength(vecType));
-        MOZ_ASSERT(!IsSimdType(laneType));
-        MOZ_ASSERT((sign != SimdSign::NotApplicable) == IsIntegerSimdType(vecType),
-                   "Signedness must be specified for integer SIMD extractLanes");
-        // The resulting type should match the lane type.
-        // Allow extracting boolean lanes directly into an Int32 (for wasm).
-        // Allow extracting Uint32 lanes into a double.
-        //
-        // We also allow extracting Uint32 lanes into a MIRType::Int32. This is
-        // equivalent to extracting the Uint32 lane to a double and then
-        // applying MTruncateToInt32, but it bypasses the conversion to/from
-        // double.
-        MOZ_ASSERT(SimdTypeToLaneType(vecType) == laneType ||
-                   (IsBooleanSimdType(vecType) && laneType == MIRType::Int32) ||
-                   (vecType == MIRType::Int32x4 && laneType == MIRType::Double &&
-                    sign == SimdSign::Unsigned));
-
-        setMovable();
-        specialization_ = vecType;
-        setResultType(laneType);
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdExtractElement)
-    TRIVIAL_NEW_WRAPPERS
-
-    unsigned lane() const {
-        return lane_;
-    }
-
-    SimdSign signedness() const {
-        return sign_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isSimdExtractElement())
-            return false;
-        const MSimdExtractElement* other = ins->toSimdExtractElement();
-        if (other->lane_ != lane_ || other->sign_ != sign_)
-            return false;
-        return congruentIfOperandsEqual(other);
-    }
-    ALLOW_CLONE(MSimdExtractElement)
-};
-
-// Replaces the datum in the given lane by a scalar value of the same type.
-class MSimdInsertElement
-  : public MBinaryInstruction,
-    public MixPolicy< SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >::Data
-{
-  private:
-    unsigned lane_;
-
-    MSimdInsertElement(MDefinition* vec, MDefinition* val, unsigned lane)
-      : MBinaryInstruction(classOpcode, vec, val), lane_(lane)
-    {
-        MIRType type = vec->type();
-        MOZ_ASSERT(IsSimdType(type));
-        MOZ_ASSERT(lane < SimdTypeToLength(type));
-        setMovable();
-        setResultType(type);
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdInsertElement)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, vector), (1, value))
-
-    unsigned lane() const {
-        return lane_;
-    }
-
-    bool canConsumeFloat32(MUse* use) const override {
-        return use == getUseFor(1) && SimdTypeToLaneType(type()) == MIRType::Float32;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return binaryCongruentTo(ins) && lane_ == ins->toSimdInsertElement()->lane();
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    ALLOW_CLONE(MSimdInsertElement)
-};
-
-// Returns true if all lanes are true.
-class MSimdAllTrue
-  : public MUnaryInstruction,
-    public SimdPolicy<0>::Data
-{
-  protected:
-    explicit MSimdAllTrue(MDefinition* obj, MIRType result)
-      : MUnaryInstruction(classOpcode, obj)
-    {
-        MIRType simdType = obj->type();
-        MOZ_ASSERT(IsBooleanSimdType(simdType));
-        MOZ_ASSERT(result == MIRType::Boolean || result == MIRType::Int32);
-        setResultType(result);
-        specialization_ = simdType;
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdAllTrue)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    ALLOW_CLONE(MSimdAllTrue)
-};
-
-// Returns true if any lane is true.
-class MSimdAnyTrue
-  : public MUnaryInstruction,
-    public SimdPolicy<0>::Data
-{
-  protected:
-    explicit MSimdAnyTrue(MDefinition* obj, MIRType result)
-      : MUnaryInstruction(classOpcode, obj)
-    {
-        MIRType simdType = obj->type();
-        MOZ_ASSERT(IsBooleanSimdType(simdType));
-        MOZ_ASSERT(result == MIRType::Boolean || result == MIRType::Int32);
-        setResultType(result);
-        specialization_ = simdType;
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdAnyTrue)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    ALLOW_CLONE(MSimdAnyTrue)
-};
-
 // Base for the MSimdSwizzle and MSimdShuffle classes.
 class MSimdShuffleBase
 {
   protected:
     // As of now, there are at most 16 lanes. For each lane, we need to know
     // which input we choose and which of the lanes we choose.
     mozilla::Array<uint8_t, 16> lane_;
     uint32_t arity_;
@@ -2193,54 +1587,16 @@ class MSimdShuffleBase
     }
 
     bool lanesMatch(uint32_t x, uint32_t y, uint32_t z, uint32_t w) const {
         return arity_ == 4 && lane(0) == x && lane(1) == y && lane(2) == z &&
                lane(3) == w;
     }
 };
 
-// Applies a swizzle operation to the input, putting the input lanes as
-// indicated in the output register's lanes. This implements the SIMD.js
-// "swizzle" function, that takes one vector and an array of lane indexes.
-class MSimdSwizzle
-  : public MUnaryInstruction,
-    public MSimdShuffleBase,
-    public NoTypePolicy::Data
-{
-  protected:
-    MSimdSwizzle(MDefinition* obj, const uint8_t lanes[])
-      : MUnaryInstruction(classOpcode, obj), MSimdShuffleBase(lanes, obj->type())
-    {
-        for (unsigned i = 0; i < arity_; i++)
-            MOZ_ASSERT(lane(i) < arity_);
-        setResultType(obj->type());
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdSwizzle)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isSimdSwizzle())
-            return false;
-        const MSimdSwizzle* other = ins->toSimdSwizzle();
-        return sameLanes(other) && congruentIfOperandsEqual(other);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    ALLOW_CLONE(MSimdSwizzle)
-};
-
 // A "general shuffle" is a swizzle or a shuffle with non-constant lane
 // indices.  This is the one that Ion inlines and it can be folded into a
 // MSimdSwizzle/MSimdShuffle if lane indices are constant.  Performance of
 // general swizzle/shuffle does not really matter, as we expect to get
 // constant indices most of the time.
 class MSimdGeneralShuffle :
     public MVariadicInstruction,
     public SimdShufflePolicy::Data
@@ -2303,612 +1659,16 @@ class MSimdGeneralShuffle :
 
     MDefinition* foldsTo(TempAllocator& alloc) override;
 
     AliasSet getAliasSet() const override {
         return AliasSet::None();
     }
 };
 
-// Applies a shuffle operation to the inputs. The lane indexes select a source
-// lane from the concatenation of the two input vectors.
-class MSimdShuffle
-  : public MBinaryInstruction,
-    public MSimdShuffleBase,
-    public NoTypePolicy::Data
-{
-    MSimdShuffle(MDefinition* lhs, MDefinition* rhs, const uint8_t lanes[])
-      : MBinaryInstruction(classOpcode, lhs, rhs), MSimdShuffleBase(lanes, lhs->type())
-    {
-        MOZ_ASSERT(IsSimdType(lhs->type()));
-        MOZ_ASSERT(IsSimdType(rhs->type()));
-        MOZ_ASSERT(lhs->type() == rhs->type());
-        for (unsigned i = 0; i < arity_; i++)
-            MOZ_ASSERT(lane(i) < 2 * arity_);
-        setResultType(lhs->type());
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdShuffle)
-
-    static MInstruction* New(TempAllocator& alloc, MDefinition* lhs, MDefinition* rhs,
-                             const uint8_t lanes[])
-    {
-        unsigned arity = SimdTypeToLength(lhs->type());
-
-        // Swap operands so that new lanes come from LHS in majority.
-        // In the balanced case, swap operands if needs be, in order to be able
-        // to do only one vshufps on x86.
-        unsigned lanesFromLHS = 0;
-        for (unsigned i = 0; i < arity; i++) {
-            if (lanes[i] < arity)
-                lanesFromLHS++;
-        }
-
-        if (lanesFromLHS < arity / 2 ||
-            (arity == 4 && lanesFromLHS == 2 && lanes[0] >= 4 && lanes[1] >= 4)) {
-            mozilla::Array<uint8_t, 16> newLanes;
-            for (unsigned i = 0; i < arity; i++)
-                newLanes[i] = (lanes[i] + arity) % (2 * arity);
-            return New(alloc, rhs, lhs, &newLanes[0]);
-        }
-
-        // If all lanes come from the same vector, just use swizzle instead.
-        if (lanesFromLHS == arity)
-            return MSimdSwizzle::New(alloc, lhs, lanes);
-
-        return new(alloc) MSimdShuffle(lhs, rhs, lanes);
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isSimdShuffle())
-            return false;
-        const MSimdShuffle* other = ins->toSimdShuffle();
-        return sameLanes(other) && binaryCongruentTo(other);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    ALLOW_CLONE(MSimdShuffle)
-};
-
-class MSimdUnaryArith
-  : public MUnaryInstruction,
-    public SimdSameAsReturnedTypePolicy<0>::Data
-{
-  public:
-    enum Operation {
-#define OP_LIST_(OP) OP,
-        FOREACH_FLOAT_SIMD_UNOP(OP_LIST_)
-        neg,
-        not_
-#undef OP_LIST_
-    };
-
-    static const char* OperationName(Operation op) {
-        switch (op) {
-          case abs:                         return "abs";
-          case neg:                         return "neg";
-          case not_:                        return "not";
-          case reciprocalApproximation:     return "reciprocalApproximation";
-          case reciprocalSqrtApproximation: return "reciprocalSqrtApproximation";
-          case sqrt:                        return "sqrt";
-        }
-        MOZ_CRASH("unexpected operation");
-    }
-
-  private:
-    Operation operation_;
-
-    MSimdUnaryArith(MDefinition* def, Operation op)
-      : MUnaryInstruction(classOpcode, def), operation_(op)
-    {
-        MIRType type = def->type();
-        MOZ_ASSERT(IsSimdType(type));
-        MOZ_ASSERT_IF(IsIntegerSimdType(type), op == neg || op == not_);
-        setResultType(type);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdUnaryArith)
-    TRIVIAL_NEW_WRAPPERS
-
-    Operation operation() const { return operation_; }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins) && ins->toSimdUnaryArith()->operation() == operation();
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    ALLOW_CLONE(MSimdUnaryArith);
-};
-
-// Compares each value of a SIMD vector to each corresponding lane's value of
-// another SIMD vector, and returns a boolean vector containing the results of
-// the comparison: all bits are set to 1 if the comparison is true, 0 otherwise.
-// When comparing integer vectors, a SimdSign must be provided to request signed
-// or unsigned comparison.
-class MSimdBinaryComp
-  : public MBinaryInstruction,
-    public SimdAllPolicy::Data
-{
-  public:
-    enum Operation {
-#define NAME_(x) x,
-        FOREACH_COMP_SIMD_OP(NAME_)
-#undef NAME_
-    };
-
-    static const char* OperationName(Operation op) {
-        switch (op) {
-#define NAME_(x) case x: return #x;
-        FOREACH_COMP_SIMD_OP(NAME_)
-#undef NAME_
-        }
-        MOZ_CRASH("unexpected operation");
-    }
-
-  private:
-    Operation operation_;
-    SimdSign sign_;
-
-    MSimdBinaryComp(MDefinition* left, MDefinition* right, Operation op, SimdSign sign)
-      : MBinaryInstruction(classOpcode, left, right), operation_(op), sign_(sign)
-    {
-        MOZ_ASSERT(left->type() == right->type());
-        MIRType opType = left->type();
-        MOZ_ASSERT(IsSimdType(opType));
-        MOZ_ASSERT((sign != SimdSign::NotApplicable) == IsIntegerSimdType(opType),
-                   "Signedness must be specified for integer SIMD compares");
-        setResultType(MIRTypeToBooleanSimdType(opType));
-        specialization_ = opType;
-        setMovable();
-        if (op == equal || op == notEqual)
-            setCommutative();
-    }
-
-    static MSimdBinaryComp* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
-                                Operation op, SimdSign sign)
-    {
-        return new (alloc) MSimdBinaryComp(left, right, op, sign);
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdBinaryComp)
-
-    // Create a MSimdBinaryComp or an equivalent sequence of instructions
-    // supported by the current target.
-    // Add all instructions to the basic block |addTo|.
-    static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
-                                      MDefinition* right, Operation op, SimdSign sign);
-
-    AliasSet getAliasSet() const override
-    {
-        return AliasSet::None();
-    }
-
-    Operation operation() const { return operation_; }
-    SimdSign signedness() const { return sign_; }
-    MIRType specialization() const { return specialization_; }
-
-    // Swap the operands and reverse the comparison predicate.
-    void reverse() {
-        switch (operation()) {
-          case greaterThan:        operation_ = lessThan; break;
-          case greaterThanOrEqual: operation_ = lessThanOrEqual; break;
-          case lessThan:           operation_ = greaterThan; break;
-          case lessThanOrEqual:    operation_ = greaterThanOrEqual; break;
-          case equal:
-          case notEqual:
-            break;
-          default: MOZ_CRASH("Unexpected compare operation");
-        }
-        swapOperands();
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!binaryCongruentTo(ins))
-            return false;
-        const MSimdBinaryComp* other = ins->toSimdBinaryComp();
-        return specialization_ == other->specialization() &&
-               operation_ == other->operation() &&
-               sign_ == other->signedness();
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    ALLOW_CLONE(MSimdBinaryComp)
-};
-
-class MSimdBinaryArith
-  : public MBinaryInstruction,
-    public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >::Data
-{
-  public:
-    enum Operation {
-#define OP_LIST_(OP) Op_##OP,
-        FOREACH_NUMERIC_SIMD_BINOP(OP_LIST_)
-        FOREACH_FLOAT_SIMD_BINOP(OP_LIST_)
-#undef OP_LIST_
-    };
-
-    static const char* OperationName(Operation op) {
-        switch (op) {
-#define OP_CASE_LIST_(OP) case Op_##OP: return #OP;
-          FOREACH_NUMERIC_SIMD_BINOP(OP_CASE_LIST_)
-          FOREACH_FLOAT_SIMD_BINOP(OP_CASE_LIST_)
-#undef OP_CASE_LIST_
-        }
-        MOZ_CRASH("unexpected operation");
-    }
-
-  private:
-    Operation operation_;
-
-    MSimdBinaryArith(MDefinition* left, MDefinition* right, Operation op)
-      : MBinaryInstruction(classOpcode, left, right), operation_(op)
-    {
-        MOZ_ASSERT(left->type() == right->type());
-        MIRType type = left->type();
-        MOZ_ASSERT(IsSimdType(type));
-        MOZ_ASSERT_IF(IsIntegerSimdType(type), op == Op_add || op == Op_sub || op == Op_mul);
-        setResultType(type);
-        setMovable();
-        if (op == Op_add || op == Op_mul || op == Op_min || op == Op_max)
-            setCommutative();
-    }
-
-    static MSimdBinaryArith* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
-                                 Operation op)
-    {
-        return new (alloc) MSimdBinaryArith(left, right, op);
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdBinaryArith)
-
-    // Create an MSimdBinaryArith instruction and add it to the basic block. Possibly
-    // create and add an equivalent sequence of instructions instead if the
-    // current target doesn't support the requested shift operation directly.
-    static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
-                                      MDefinition* right, Operation op);
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    Operation operation() const { return operation_; }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!binaryCongruentTo(ins))
-            return false;
-        return operation_ == ins->toSimdBinaryArith()->operation();
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    ALLOW_CLONE(MSimdBinaryArith)
-};
-
-class MSimdBinarySaturating
-  : public MBinaryInstruction,
-    public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1>>::Data
-{
-  public:
-    enum Operation
-    {
-        add,
-        sub,
-    };
-
-    static const char* OperationName(Operation op)
-    {
-        switch (op) {
-          case add:
-            return "add";
-          case sub:
-            return "sub";
-        }
-        MOZ_CRASH("unexpected operation");
-    }
-
-  private:
-    Operation operation_;
-    SimdSign sign_;
-
-    MSimdBinarySaturating(MDefinition* left, MDefinition* right, Operation op, SimdSign sign)
-      : MBinaryInstruction(classOpcode, left, right)
-      , operation_(op)
-      , sign_(sign)
-    {
-        MOZ_ASSERT(left->type() == right->type());
-        MIRType type = left->type();
-        MOZ_ASSERT(type == MIRType::Int8x16 || type == MIRType::Int16x8);
-        setResultType(type);
-        setMovable();
-        if (op == add)
-            setCommutative();
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdBinarySaturating)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override { return AliasSet::None(); }
-
-    Operation operation() const { return operation_; }
-    SimdSign signedness() const { return sign_; }
-
-    bool congruentTo(const MDefinition* ins) const override
-    {
-        if (!binaryCongruentTo(ins))
-            return false;
-        return operation_ == ins->toSimdBinarySaturating()->operation() &&
-               sign_ == ins->toSimdBinarySaturating()->signedness();
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    ALLOW_CLONE(MSimdBinarySaturating)
-};
-
-class MSimdBinaryBitwise
-  : public MBinaryInstruction,
-    public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >::Data
-{
-  public:
-    enum Operation {
-        and_,
-        or_,
-        xor_
-    };
-
-    static const char* OperationName(Operation op) {
-        switch (op) {
-          case and_: return "and";
-          case or_:  return "or";
-          case xor_: return "xor";
-        }
-        MOZ_CRASH("unexpected operation");
-    }
-
-  private:
-    Operation operation_;
-
-    MSimdBinaryBitwise(MDefinition* left, MDefinition* right, Operation op)
-      : MBinaryInstruction(classOpcode, left, right), operation_(op)
-    {
-        MOZ_ASSERT(left->type() == right->type());
-        MIRType type = left->type();
-        MOZ_ASSERT(IsSimdType(type));
-        setResultType(type);
-        setMovable();
-        setCommutative();
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdBinaryBitwise)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    Operation operation() const { return operation_; }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!binaryCongruentTo(ins))
-            return false;
-        return operation_ == ins->toSimdBinaryBitwise()->operation();
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    ALLOW_CLONE(MSimdBinaryBitwise)
-};
-
-class MSimdShift
-  : public MBinaryInstruction,
-    public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >::Data
-{
-  public:
-    enum Operation {
-        lsh,
-        rsh,
-        ursh
-    };
-
-  private:
-    Operation operation_;
-
-    MSimdShift(MDefinition* left, MDefinition* right, Operation op)
-      : MBinaryInstruction(classOpcode, left, right), operation_(op)
-    {
-        MIRType type = left->type();
-        MOZ_ASSERT(IsIntegerSimdType(type));
-        setResultType(type);
-        setMovable();
-    }
-
-    static MSimdShift* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
-                           Operation op)
-    {
-        return new (alloc) MSimdShift(left, right, op);
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdShift)
-
-    // Create an MSimdShift instruction and add it to the basic block. Possibly
-    // create and add an equivalent sequence of instructions instead if the
-    // current target doesn't support the requested shift operation directly.
-    // Return the inserted MInstruction that computes the shifted value.
-    static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
-                                      MDefinition* right, Operation op);
-
-    // Get the relevant right shift operation given the signedness of a type.
-    static Operation rshForSign(SimdSign sign) {
-        return sign == SimdSign::Unsigned ? ursh : rsh;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    Operation operation() const { return operation_; }
-
-    static const char* OperationName(Operation op) {
-        switch (op) {
-          case lsh:  return "lsh";
-          case rsh:  return "rsh-arithmetic";
-          case ursh: return "rsh-logical";
-        }
-        MOZ_CRASH("unexpected operation");
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!binaryCongruentTo(ins))
-            return false;
-        return operation_ == ins->toSimdShift()->operation();
-    }
-
-    ALLOW_CLONE(MSimdShift)
-};
-
-class MSimdSelect
-  : public MTernaryInstruction,
-    public SimdSelectPolicy::Data
-{
-    MSimdSelect(MDefinition* mask, MDefinition* lhs, MDefinition* rhs)
-      : MTernaryInstruction(classOpcode, mask, lhs, rhs)
-    {
-        MOZ_ASSERT(IsBooleanSimdType(mask->type()));
-        MOZ_ASSERT(lhs->type() == lhs->type());
-        MIRType type = lhs->type();
-        MOZ_ASSERT(IsSimdType(type));
-        setResultType(type);
-        specialization_ = type;
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(SimdSelect)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, mask))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    ALLOW_CLONE(MSimdSelect)
-};
-
-// Deep clone a constant JSObject.
-class MCloneLiteral
-  : public MUnaryInstruction,
-    public ObjectPolicy<0>::Data
-{
-  protected:
-    explicit MCloneLiteral(MDefinition* obj)
-      : MUnaryInstruction(classOpcode, obj)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(CloneLiteral)
-    TRIVIAL_NEW_WRAPPERS
-};
-
-class MParameter : public MNullaryInstruction
-{
-    int32_t index_;
-
-    MParameter(int32_t index, TemporaryTypeSet* types)
-      : MNullaryInstruction(classOpcode),
-        index_(index)
-    {
-        setResultType(MIRType::Value);
-        setResultTypeSet(types);
-    }
-
-  public:
-    INSTRUCTION_HEADER(Parameter)
-    TRIVIAL_NEW_WRAPPERS
-
-    static const int32_t THIS_SLOT = -1;
-    int32_t index() const {
-        return index_;
-    }
-    void printOpcode(GenericPrinter& out) const override;
-
-    HashNumber valueHash() const override;
-    bool congruentTo(const MDefinition* ins) const override;
-};
-
-class MCallee : public MNullaryInstruction
-{
-  public:
-    MCallee()
-      : MNullaryInstruction(classOpcode)
-    {
-        setResultType(MIRType::Object);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Callee)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-class MIsConstructing : public MNullaryInstruction
-{
-  public:
-    MIsConstructing()
-      : MNullaryInstruction(classOpcode)
-    {
-        setResultType(MIRType::Boolean);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(IsConstructing)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
 class MControlInstruction : public MInstruction
 {
   protected:
     explicit MControlInstruction(Opcode op)
       : MInstruction(op)
     { }
 
   public:
@@ -4063,98 +2823,16 @@ class MArgumentState
     bool congruentTo(const MDefinition* ins) const override {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const override {
         return AliasSet::None();
     }
 };
 
-// Setting __proto__ in an object literal.
-class MMutateProto
-  : public MBinaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
-{
-  protected:
-    MMutateProto(MDefinition* obj, MDefinition* value)
-      : MBinaryInstruction(classOpcode, obj, value)
-    {
-        setResultType(MIRType::None);
-    }
-
-  public:
-    INSTRUCTION_HEADER(MutateProto)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, getObject), (1, getValue))
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
-class MInitPropGetterSetter
-  : public MBinaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >::Data
-{
-    CompilerPropertyName name_;
-
-    MInitPropGetterSetter(MDefinition* obj, PropertyName* name, MDefinition* value)
-      : MBinaryInstruction(classOpcode, obj, value),
-        name_(name)
-    { }
-
-  public:
-    INSTRUCTION_HEADER(InitPropGetterSetter)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, value))
-
-    PropertyName* name() const {
-        return name_;
-    }
-
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(name_);
-    }
-};
-
-class MInitElem
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1>, BoxPolicy<2> >::Data
-{
-    MInitElem(MDefinition* obj, MDefinition* id, MDefinition* value)
-      : MTernaryInstruction(classOpcode, obj, id, value)
-    {
-        setResultType(MIRType::None);
-    }
-
-  public:
-    INSTRUCTION_HEADER(InitElem)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, getObject), (1, getId), (2, getValue))
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
-class MInitElemGetterSetter
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1>, ObjectPolicy<2> >::Data
-{
-    MInitElemGetterSetter(MDefinition* obj, MDefinition* id, MDefinition* value)
-      : MTernaryInstruction(classOpcode, obj, id, value)
-    { }
-
-  public:
-    INSTRUCTION_HEADER(InitElemGetterSetter)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, idValue), (2, value))
-
-};
-
 // WrappedFunction wraps a JSFunction so it can safely be used off-thread.
 // In particular, a function's flags can be modified on the active thread as
 // functions are relazified and delazified, so we must be careful not to access
 // these flags off-thread.
 class WrappedFunction : public TempObject
 {
     CompilerFunction fun_;
     uint16_t nargs_;
@@ -4340,124 +3018,16 @@ class MCallDOMNative : public MCall
 
     virtual bool isCallDOMNative() const override {
         return true;
     }
 
     virtual void computeMovable() override;
 };
 
-// fun.apply(self, arguments)
-class MApplyArgs
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, IntPolicy<1>, BoxPolicy<2> >::Data
-{
-  protected:
-    // Monomorphic cache of single target from TI, or nullptr.
-    WrappedFunction* target_;
-
-    MApplyArgs(WrappedFunction* target, MDefinition* fun, MDefinition* argc, MDefinition* self)
-      : MTernaryInstruction(classOpcode, fun, argc, self),
-        target_(target)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ApplyArgs)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, getFunction), (1, getArgc), (2, getThis))
-
-    // For TI-informed monomorphic callsites.
-    WrappedFunction* getSingleTarget() const {
-        return target_;
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-
-    bool appendRoots(MRootList& roots) const override {
-        if (target_)
-            return target_->appendRoots(roots);
-        return true;
-    }
-};
-
-// fun.apply(fn, array)
-class MApplyArray
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>, BoxPolicy<2> >::Data
-{
-  protected:
-    // Monomorphic cache of single target from TI, or nullptr.
-    WrappedFunction* target_;
-
-    MApplyArray(WrappedFunction* target, MDefinition* fun, MDefinition* elements, MDefinition* self)
-      : MTernaryInstruction(classOpcode, fun, elements, self),
-        target_(target)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ApplyArray)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, getFunction), (1, getElements), (2, getThis))
-
-    // For TI-informed monomorphic callsites.
-    WrappedFunction* getSingleTarget() const {
-        return target_;
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-
-    bool appendRoots(MRootList& roots) const override {
-        if (target_)
-            return target_->appendRoots(roots);
-        return true;
-    }
-};
-
-class MBail : public MNullaryInstruction
-{
-  protected:
-    explicit MBail(BailoutKind kind)
-      : MNullaryInstruction(classOpcode)
-    {
-        bailoutKind_ = kind;
-        setGuard();
-    }
-
-  private:
-    BailoutKind bailoutKind_;
-
-  public:
-    INSTRUCTION_HEADER(Bail)
-
-    static MBail*
-    New(TempAllocator& alloc, BailoutKind kind) {
-        return new(alloc) MBail(kind);
-    }
-    static MBail*
-    New(TempAllocator& alloc) {
-        return new(alloc) MBail(Bailout_Inevitable);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    BailoutKind bailoutKind() const {
-        return bailoutKind_;
-    }
-};
-
 class MUnreachable
   : public MAryControlInstruction<0, 0>,
     public NoTypePolicy::Data
 {
     MUnreachable()
       : MAryControlInstruction(classOpcode)
     { }
 
@@ -4465,858 +3035,28 @@ class MUnreachable
     INSTRUCTION_HEADER(Unreachable)
     TRIVIAL_NEW_WRAPPERS
 
     AliasSet getAliasSet() const override {
         return AliasSet::None();
     }
 };
 
-// This class serve as a way to force the encoding of a snapshot, even if there
-// is no resume point using it.  This is useful to run MAssertRecoveredOnBailout
-// assertions.
-class MEncodeSnapshot : public MNullaryInstruction
-{
-  protected:
-    MEncodeSnapshot()
-      : MNullaryInstruction(classOpcode)
-    {
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(EncodeSnapshot)
-
-    static MEncodeSnapshot*
-    New(TempAllocator& alloc) {
-        return new(alloc) MEncodeSnapshot();
-    }
-};
-
-class MAssertRecoveredOnBailout
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-  protected:
-    bool mustBeRecovered_;
-
-    MAssertRecoveredOnBailout(MDefinition* ins, bool mustBeRecovered)
-      : MUnaryInstruction(classOpcode, ins), mustBeRecovered_(mustBeRecovered)
-    {
-        setResultType(MIRType::Value);
-        setRecoveredOnBailout();
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(AssertRecoveredOnBailout)
-    TRIVIAL_NEW_WRAPPERS
-
-    // Needed to assert that float32 instructions are correctly recovered.
-    bool canConsumeFloat32(MUse* use) const override { return true; }
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-};
-
-class MAssertFloat32
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-  protected:
-    bool mustBeFloat32_;
-
-    MAssertFloat32(MDefinition* value, bool mustBeFloat32)
-      : MUnaryInstruction(classOpcode, value), mustBeFloat32_(mustBeFloat32)
-    {
-    }
-
-  public:
-    INSTRUCTION_HEADER(AssertFloat32)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool canConsumeFloat32(MUse* use) const override { return true; }
-
-    bool mustBeFloat32() const { return mustBeFloat32_; }
-};
-
-class MGetDynamicName
-  : public MBinaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, ConvertToStringPolicy<1> >::Data
-{
-  protected:
-    MGetDynamicName(MDefinition* envChain, MDefinition* name)
-      : MBinaryInstruction(classOpcode, envChain, name)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(GetDynamicName)
-    NAMED_OPERANDS((0, getEnvironmentChain), (1, getName))
-
-    static MGetDynamicName*
-    New(TempAllocator& alloc, MDefinition* envChain, MDefinition* name) {
-        return new(alloc) MGetDynamicName(envChain, name);
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
-class MCallDirectEval
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>,
-                     StringPolicy<1>,
-                     BoxPolicy<2> >::Data
-{
-  protected:
-    MCallDirectEval(MDefinition* envChain, MDefinition* string,
-                    MDefinition* newTargetValue, jsbytecode* pc)
-      : MTernaryInstruction(classOpcode, envChain, string, newTargetValue),
-        pc_(pc)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(CallDirectEval)
-    NAMED_OPERANDS((0, getEnvironmentChain), (1, getString), (2, getNewTargetValue))
-
-    static MCallDirectEval*
-    New(TempAllocator& alloc, MDefinition* envChain, MDefinition* string,
-        MDefinition* newTargetValue, jsbytecode* pc)
-    {
-        return new(alloc) MCallDirectEval(envChain, string, newTargetValue, pc);
-    }
-
-    jsbytecode* pc() const {
-        return pc_;
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-
-  private:
-    jsbytecode* pc_;
-};
-
-class MCompare
-  : public MBinaryInstruction,
-    public ComparePolicy::Data
-{
-  public:
-    enum CompareType {
-
-        // Anything compared to Undefined
-        Compare_Undefined,
-
-        // Anything compared to Null
-        Compare_Null,
-
-        // Undefined compared to Boolean
-        // Null      compared to Boolean
-        // Double    compared to Boolean
-        // String    compared to Boolean
-        // Symbol    compared to Boolean
-        // Object    compared to Boolean
-        // Value     compared to Boolean
-        Compare_Boolean,
-
-        // Int32   compared to Int32
-        // Boolean compared to Boolean
-        Compare_Int32,
-        Compare_Int32MaybeCoerceBoth,
-        Compare_Int32MaybeCoerceLHS,
-        Compare_Int32MaybeCoerceRHS,
-
-        // Int32 compared as unsigneds
-        Compare_UInt32,
-
-        // Int64 compared to Int64.
-        Compare_Int64,
-
-        // Int64 compared as unsigneds.
-        Compare_UInt64,
-
-        // Double compared to Double
-        Compare_Double,
-
-        Compare_DoubleMaybeCoerceLHS,
-        Compare_DoubleMaybeCoerceRHS,
-
-        // Float compared to Float
-        Compare_Float32,
-
-        // String compared to String
-        Compare_String,
-
-        // Symbol compared to Symbol
-        Compare_Symbol,
-
-        // Undefined compared to String
-        // Null      compared to String
-        // Boolean   compared to String
-        // Int32     compared to String
-        // Double    compared to String
-        // Object    compared to String
-        // Value     compared to String
-        Compare_StrictString,
-
-        // Object compared to Object
-        Compare_Object,
-
-        // Compare 2 values bitwise
-        Compare_Bitwise,
-
-        // All other possible compares
-        Compare_Unknown
-    };
-
-  private:
-    CompareType compareType_;
-    JSOp jsop_;
-    bool operandMightEmulateUndefined_;
-    bool operandsAreNeverNaN_;
-
-    // When a floating-point comparison is converted to an integer comparison
-    // (when range analysis proves it safe), we need to convert the operands
-    // to integer as well.
-    bool truncateOperands_;
-
-    MCompare(MDefinition* left, MDefinition* right, JSOp jsop)
-      : MBinaryInstruction(classOpcode, left, right),
-        compareType_(Compare_Unknown),
-        jsop_(jsop),
-        operandMightEmulateUndefined_(true),
-        operandsAreNeverNaN_(false),
-        truncateOperands_(false)
-    {
-        setResultType(MIRType::Boolean);
-        setMovable();
-    }
-
-    MCompare(MDefinition* left, MDefinition* right, JSOp jsop, CompareType compareType)
-      : MCompare(left, right, jsop)
-    {
-        MOZ_ASSERT(compareType == Compare_Int32 || compareType == Compare_UInt32 ||
-                   compareType == Compare_Int64 || compareType == Compare_UInt64 ||
-                   compareType == Compare_Double || compareType == Compare_Float32);
-        compareType_ = compareType;
-        operandMightEmulateUndefined_ = false;
-        setResultType(MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(Compare)
-    TRIVIAL_NEW_WRAPPERS
-
-    MOZ_MUST_USE bool tryFold(bool* result);
-    MOZ_MUST_USE bool evaluateConstantOperands(TempAllocator& alloc, bool* result);
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    void filtersUndefinedOrNull(bool trueBranch, MDefinition** subject, bool* filtersUndefined,
-                                bool* filtersNull);
-
-    CompareType compareType() const {
-        return compareType_;
-    }
-    bool isInt32Comparison() const {
-        return compareType() == Compare_Int32 ||
-               compareType() == Compare_Int32MaybeCoerceBoth ||
-               compareType() == Compare_Int32MaybeCoerceLHS ||
-               compareType() == Compare_Int32MaybeCoerceRHS;
-    }
-    bool isDoubleComparison() const {
-        return compareType() == Compare_Double ||
-               compareType() == Compare_DoubleMaybeCoerceLHS ||
-               compareType() == Compare_DoubleMaybeCoerceRHS;
-    }
-    bool isFloat32Comparison() const {
-        return compareType() == Compare_Float32;
-    }
-    bool isNumericComparison() const {
-        return isInt32Comparison() ||
-               isDoubleComparison() ||
-               isFloat32Comparison();
-    }
-    void setCompareType(CompareType type) {
-        compareType_ = type;
-    }
-    MIRType inputType();
-
-    JSOp jsop() const {
-        return jsop_;
-    }
-    void markNoOperandEmulatesUndefined() {
-        operandMightEmulateUndefined_ = false;
-    }
-    bool operandMightEmulateUndefined() const {
-        return operandMightEmulateUndefined_;
-    }
-    bool operandsAreNeverNaN() const {
-        return operandsAreNeverNaN_;
-    }
-    AliasSet getAliasSet() const override {
-        // Strict equality is never effectful.
-        if (jsop_ == JSOP_STRICTEQ || jsop_ == JSOP_STRICTNE)
-            return AliasSet::None();
-        if (compareType_ == Compare_Unknown)
-            return AliasSet::Store(AliasSet::Any);
-        MOZ_ASSERT(compareType_ <= Compare_Bitwise);
-        return AliasSet::None();
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-    void collectRangeInfoPreTrunc() override;
-
-    void trySpecializeFloat32(TempAllocator& alloc) override;
-    bool isFloat32Commutative() const override { return true; }
-    bool needTruncation(TruncateKind kind) override;
-    void truncate() override;
-    TruncateKind operandTruncateKind(size_t index) const override;
-
-    static CompareType determineCompareType(JSOp op, MDefinition* left, MDefinition* right);
-    void cacheOperandMightEmulateUndefined(CompilerConstraintList* constraints);
-
-# ifdef DEBUG
-    bool isConsistentFloat32Use(MUse* use) const override {
-        // Both sides of the compare can be Float32
-        return compareType_ == Compare_Float32;
-    }
-# endif
-
-    ALLOW_CLONE(MCompare)
-
-  protected:
-    MOZ_MUST_USE bool tryFoldEqualOperands(bool* result);
-    MOZ_MUST_USE bool tryFoldTypeOf(bool* result);
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!binaryCongruentTo(ins))
-            return false;
-        return compareType() == ins->toCompare()->compareType() &&
-               jsop() == ins->toCompare()->jsop();
-    }
-};
-
-// Takes a typed value and returns an untyped value.
-class MBox
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    MBox(TempAllocator& alloc, MDefinition* ins)
-      : MUnaryInstruction(classOpcode, ins)
-    {
-        setResultType(MIRType::Value);
-        if (ins->resultTypeSet()) {
-            setResultTypeSet(ins->resultTypeSet());
-        } else if (ins->type() != MIRType::Value) {
-            TypeSet::Type ntype = ins->type() == MIRType::Object
-                                  ? TypeSet::AnyObjectType()
-                                  : TypeSet::PrimitiveType(ValueTypeFromMIRType(ins->type()));
-            setResultTypeSet(alloc.lifoAlloc()->new_<TemporaryTypeSet>(alloc.lifoAlloc(), ntype));
-        }
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Box)
-    static MBox* New(TempAllocator& alloc, MDefinition* ins)
-    {
-        // Cannot box a box.
-        MOZ_ASSERT(ins->type() != MIRType::Value);
-
-        return new(alloc) MBox(alloc, ins);
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    ALLOW_CLONE(MBox)
-};
+
 
 // Note: the op may have been inverted during lowering (to put constants in a
 // position where they can be immediates), so it is important to use the
 // lir->jsop() instead of the mir->jsop() when it is present.
 static inline Assembler::Condition
 JSOpToCondition(MCompare::CompareType compareType, JSOp op)
 {
     bool isSigned = (compareType != MCompare::Compare_UInt32);
     return JSOpToCondition(op, isSigned);
 }
 
-// Takes a typed value and checks if it is a certain type. If so, the payload
-// is unpacked and returned as that type. Otherwise, it is considered a
-// deoptimization.
-class MUnbox final : public MUnaryInstruction, public BoxInputsPolicy::Data
-{
-  public:
-    enum Mode {
-        Fallible,       // Check the type, and deoptimize if unexpected.
-        Infallible,     // Type guard is not necessary.
-        TypeBarrier     // Guard on the type, and act like a TypeBarrier on failure.
-    };
-
-  private:
-    Mode mode_;
-    BailoutKind bailoutKind_;
-
-    MUnbox(MDefinition* ins, MIRType type, Mode mode, BailoutKind kind, TempAllocator& alloc)
-      : MUnaryInstruction(classOpcode, ins),
-        mode_(mode)
-    {
-        // Only allow unboxing a non MIRType::Value when input and output types
-        // don't match. This is often used to force a bailout. Boxing happens
-        // during type analysis.
-        MOZ_ASSERT_IF(ins->type() != MIRType::Value, type != ins->type());
-
-        MOZ_ASSERT(type == MIRType::Boolean ||
-                   type == MIRType::Int32   ||
-                   type == MIRType::Double  ||
-                   type == MIRType::String  ||
-                   type == MIRType::Symbol  ||
-                   type == MIRType::Object);
-
-        TemporaryTypeSet* resultSet = ins->resultTypeSet();
-        if (resultSet && type == MIRType::Object)
-            resultSet = resultSet->cloneObjectsOnly(alloc.lifoAlloc());
-
-        setResultType(type);
-        setResultTypeSet(resultSet);
-        setMovable();
-
-        if (mode_ == TypeBarrier || mode_ == Fallible)
-            setGuard();
-
-        bailoutKind_ = kind;
-    }
-  public:
-    INSTRUCTION_HEADER(Unbox)
-    static MUnbox* New(TempAllocator& alloc, MDefinition* ins, MIRType type, Mode mode)
-    {
-        // Unless we were given a specific BailoutKind, pick a default based on
-        // the type we expect.
-        BailoutKind kind;
-        switch (type) {
-          case MIRType::Boolean:
-            kind = Bailout_NonBooleanInput;
-            break;
-          case MIRType::Int32:
-            kind = Bailout_NonInt32Input;
-            break;
-          case MIRType::Double:
-            kind = Bailout_NonNumericInput; // Int32s are fine too
-            break;
-          case MIRType::String:
-            kind = Bailout_NonStringInput;
-            break;
-          case MIRType::Symbol:
-            kind = Bailout_NonSymbolInput;
-            break;
-          case MIRType::Object:
-            kind = Bailout_NonObjectInput;
-            break;
-          default:
-            MOZ_CRASH("Given MIRType cannot be unboxed.");
-        }
-
-        return new(alloc) MUnbox(ins, type, mode, kind, alloc);
-    }
-
-    static MUnbox* New(TempAllocator& alloc, MDefinition* ins, MIRType type, Mode mode,
-                       BailoutKind kind)
-    {
-        return new(alloc) MUnbox(ins, type, mode, kind, alloc);
-    }
-
-    Mode mode() const {
-        return mode_;
-    }
-    BailoutKind bailoutKind() const {
-        // If infallible, no bailout should be generated.
-        MOZ_ASSERT(fallible());
-        return bailoutKind_;
-    }
-    bool fallible() const {
-        return mode() != Infallible;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isUnbox() || ins->toUnbox()->mode() != mode())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    void printOpcode(GenericPrinter& out) const override;
-    void makeInfallible() {
-        // Should only be called if we're already Infallible or TypeBarrier
-        MOZ_ASSERT(mode() != Fallible);
-        mode_ = Infallible;
-    }
-
-    ALLOW_CLONE(MUnbox)
-};
-
-class MGuardObject
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MGuardObject(MDefinition* ins)
-      : MUnaryInstruction(classOpcode, ins)
-    {
-        setGuard();
-        setMovable();
-        setResultType(MIRType::Object);
-        setResultTypeSet(ins->resultTypeSet());
-    }
-
-  public:
-    INSTRUCTION_HEADER(GuardObject)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-class MGuardString
-  : public MUnaryInstruction,
-    public StringPolicy<0>::Data
-{
-    explicit MGuardString(MDefinition* ins)
-      : MUnaryInstruction(classOpcode, ins)
-    {
-        setGuard();
-        setMovable();
-        setResultType(MIRType::String);
-    }
-
-  public:
-    INSTRUCTION_HEADER(GuardString)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-class MPolyInlineGuard
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MPolyInlineGuard(MDefinition* ins)
-      : MUnaryInstruction(classOpcode, ins)
-    {
-        setGuard();
-        setResultType(MIRType::Object);
-        setResultTypeSet(ins->resultTypeSet());
-    }
-
-  public:
-    INSTRUCTION_HEADER(PolyInlineGuard)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-class MAssertRange
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    // This is the range checked by the assertion. Don't confuse this with the
-    // range_ member or the range() accessor. Since MAssertRange doesn't return
-    // a value, it doesn't use those.
-    const Range* assertedRange_;
-
-    MAssertRange(MDefinition* ins, const Range* assertedRange)
-      : MUnaryInstruction(classOpcode, ins), assertedRange_(assertedRange)
-    {
-        setGuard();
-        setResultType(MIRType::None);
-    }
-
-  public:
-    INSTRUCTION_HEADER(AssertRange)
-    TRIVIAL_NEW_WRAPPERS
-
-    const Range* assertedRange() const {
-        return assertedRange_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-};
-
-// Caller-side allocation of |this| for |new|:
-// Given a templateobject, construct |this| for JSOP_NEW
-class MCreateThisWithTemplate
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    gc::InitialHeap initialHeap_;
-
-    MCreateThisWithTemplate(TempAllocator& alloc, CompilerConstraintList* constraints,
-                            MConstant* templateConst, gc::InitialHeap initialHeap)
-      : MUnaryInstruction(classOpcode, templateConst),
-        initialHeap_(initialHeap)
-    {
-        setResultType(MIRType::Object);
-        setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, templateObject()));
-    }
-
-  public:
-    INSTRUCTION_HEADER(CreateThisWithTemplate)
-    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
-
-    // Template for |this|, provided by TI.
-    JSObject* templateObject() const {
-        return &getOperand(0)->toConstant()->toObject();
-    }
-
-    gc::InitialHeap initialHeap() const {
-        return initialHeap_;
-    }
-
-    // Although creation of |this| modifies global state, it is safely repeatable.
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override;
-};
-
-// Caller-side allocation of |this| for |new|:
-// Given a prototype operand, construct |this| for JSOP_NEW.
-class MCreateThisWithProto
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>, ObjectPolicy<2> >::Data
-{
-    MCreateThisWithProto(MDefinition* callee, MDefinition* newTarget, MDefinition* prototype)
-      : MTernaryInstruction(classOpcode, callee, newTarget, prototype)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(CreateThisWithProto)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, getCallee), (1, getNewTarget), (2, getPrototype))
-
-    // Although creation of |this| modifies global state, it is safely repeatable.
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
-// Caller-side allocation of |this| for |new|:
-// Constructs |this| when possible, else MagicValue(JS_IS_CONSTRUCTING).
-class MCreateThis
-  : public MBinaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >::Data
-{
-    explicit MCreateThis(MDefinition* callee, MDefinition* newTarget)
-      : MBinaryInstruction(classOpcode, callee, newTarget)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(CreateThis)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, getCallee), (1, getNewTarget))
-
-    // Although creation of |this| modifies global state, it is safely repeatable.
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
-// Eager initialization of arguments object.
-class MCreateArgumentsObject
-  : public MUnaryInstruction,
-    public ObjectPolicy<0>::Data
-{
-    CompilerGCPointer<ArgumentsObject*> templateObj_;
-
-    MCreateArgumentsObject(MDefinition* callObj, ArgumentsObject* templateObj)
-      : MUnaryInstruction(classOpcode, callObj),
-        templateObj_(templateObj)
-    {
-        setResultType(MIRType::Object);
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(CreateArgumentsObject)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, getCallObject))
-
-    ArgumentsObject* templateObject() const {
-        return templateObj_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(templateObj_);
-    }
-};
-
-class MGetArgumentsObjectArg
-  : public MUnaryInstruction,
-    public ObjectPolicy<0>::Data
-{
-    size_t argno_;
-
-    MGetArgumentsObjectArg(MDefinition* argsObject, size_t argno)
-      : MUnaryInstruction(classOpcode, argsObject),
-        argno_(argno)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(GetArgumentsObjectArg)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, getArgsObject))
-
-    size_t argno() const {
-        return argno_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::Any);
-    }
-};
-
-class MSetArgumentsObjectArg
-  : public MBinaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
-{
-    size_t argno_;
-
-    MSetArgumentsObjectArg(MDefinition* argsObj, size_t argno, MDefinition* value)
-      : MBinaryInstruction(classOpcode, argsObj, value),
-        argno_(argno)
-    {
-    }
-
-  public:
-    INSTRUCTION_HEADER(SetArgumentsObjectArg)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, getArgsObject), (1, getValue))
-
-    size_t argno() const {
-        return argno_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::Any);
-    }
-};
-
-class MRunOncePrologue
-  : public MNullaryInstruction
-{
-  protected:
-    MRunOncePrologue()
-      : MNullaryInstruction(classOpcode)
-    {
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(RunOncePrologue)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
-// Given a MIRType::Value A and a MIRType::Object B:
-// If the Value may be safely unboxed to an Object, return Object(A).
-// Otherwise, return B.
-// Used to implement return behavior for inlined constructors.
-class MReturnFromCtor
-  : public MBinaryInstruction,
-    public MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >::Data
-{
-    MReturnFromCtor(MDefinition* value, MDefinition* object)
-      : MBinaryInstruction(classOpcode, value, object)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ReturnFromCtor)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, getValue), (1, getObject))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-class MToFPInstruction
-  : public MUnaryInstruction,
-    public ToDoublePolicy::Data
-{
-  public:
-    // Types of values which can be converted.
-    enum ConversionKind {
-        NonStringPrimitives,
-        NonNullNonStringPrimitives,
-        NumbersOnly
-    };
-
-  private:
-    ConversionKind conversion_;
-
-  protected:
-    MToFPInstruction(Opcode op, MDefinition* def, ConversionKind conversion = NonStringPrimitives)
-      : MUnaryInstruction(op, def), conversion_(conversion)
-    { }
-
-  public:
-    ConversionKind conversion() const {
-        return conversion_;
-    }
-};
-
 // Converts a primitive (either typed or untyped) to a double. If the input is
 // not primitive at runtime, a bailout occurs.
 class MToDouble
   : public MToFPInstruction
 {
   private:
     TruncateKind implicitTruncate_;
 
@@ -5426,656 +3166,16 @@ class MToFloat32
     MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
     bool canRecoverOnBailout() const override {
         return true;
     }
 
     ALLOW_CLONE(MToFloat32)
 };
 
-// Converts a uint32 to a double (coming from wasm).
-class MWasmUnsignedToDouble
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    explicit MWasmUnsignedToDouble(MDefinition* def)
-      : MUnaryInstruction(classOpcode, def)
-    {
-        setResultType(MIRType::Double);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(WasmUnsignedToDouble)
-    TRIVIAL_NEW_WRAPPERS
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// Converts a uint32 to a float32 (coming from wasm).
-class MWasmUnsignedToFloat32
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    explicit MWasmUnsignedToFloat32(MDefinition* def)
-      : MUnaryInstruction(classOpcode, def)
-    {
-        setResultType(MIRType::Float32);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(WasmUnsignedToFloat32)
-    TRIVIAL_NEW_WRAPPERS
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool canProduceFloat32() const override { return true; }
-};
-
-class MWrapInt64ToInt32
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    bool bottomHalf_;
-
-    explicit MWrapInt64ToInt32(MDefinition* def, bool bottomHalf = true)
-      : MUnaryInstruction(classOpcode, def),
-        bottomHalf_(bottomHalf)
-    {
-        setResultType(MIRType::Int32);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(WrapInt64ToInt32)
-    TRIVIAL_NEW_WRAPPERS
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isWrapInt64ToInt32())
-            return false;
-        if (ins->toWrapInt64ToInt32()->bottomHalf() != bottomHalf())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool bottomHalf() const {
-        return bottomHalf_;
-    }
-};
-
-class MExtendInt32ToInt64
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    bool isUnsigned_;
-
-    MExtendInt32ToInt64(MDefinition* def, bool isUnsigned)
-      : MUnaryInstruction(classOpcode, def),
-        isUnsigned_(isUnsigned)
-    {
-        setResultType(MIRType::Int64);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(ExtendInt32ToInt64)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool isUnsigned() const { return isUnsigned_; }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isExtendInt32ToInt64())
-            return false;
-        if (ins->toExtendInt32ToInt64()->isUnsigned_ != isUnsigned_)
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-class MWasmTruncateToInt64
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    bool isUnsigned_;
-    wasm::BytecodeOffset bytecodeOffset_;
-
-    MWasmTruncateToInt64(MDefinition* def, bool isUnsigned, wasm::BytecodeOffset bytecodeOffset)
-      : MUnaryInstruction(classOpcode, def),
-        isUnsigned_(isUnsigned),
-        bytecodeOffset_(bytecodeOffset)
-    {
-        setResultType(MIRType::Int64);
-        setGuard(); // neither removable nor movable because of possible side-effects.
-    }
-
-  public:
-    INSTRUCTION_HEADER(WasmTruncateToInt64)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool isUnsigned() const { return isUnsigned_; }
-    wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins) &&
-               ins->toWasmTruncateToInt64()->isUnsigned() == isUnsigned_;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// Truncate a value to an int32, with wasm semantics: this will trap when the
-// value is out of range.
-class MWasmTruncateToInt32
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    bool isUnsigned_;
-    wasm::BytecodeOffset bytecodeOffset_;
-
-    explicit MWasmTruncateToInt32(MDefinition* def, bool isUnsigned,
-                                  wasm::BytecodeOffset bytecodeOffset)
-      : MUnaryInstruction(classOpcode, def),
-        isUnsigned_(isUnsigned), bytecodeOffset_(bytecodeOffset)
-    {
-        setResultType(MIRType::Int32);
-        setGuard(); // neither removable nor movable because of possible side-effects.
-    }
-
-  public:
-    INSTRUCTION_HEADER(WasmTruncateToInt32)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool isUnsigned() const {
-        return isUnsigned_;
-    }
-    wasm::BytecodeOffset bytecodeOffset() const {
-        return bytecodeOffset_;
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins) &&
-               ins->toWasmTruncateToInt32()->isUnsigned() == isUnsigned_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-class MInt64ToFloatingPoint
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    bool isUnsigned_;
-    wasm::BytecodeOffset bytecodeOffset_;
-
-    MInt64ToFloatingPoint(MDefinition* def, MIRType type, wasm::BytecodeOffset bytecodeOffset,
-                          bool isUnsigned)
-      : MUnaryInstruction(classOpcode, def),
-        isUnsigned_(isUnsigned),
-        bytecodeOffset_(bytecodeOffset)
-    {
-        MOZ_ASSERT(IsFloatingPointType(type));
-        setResultType(type);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Int64ToFloatingPoint)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool isUnsigned() const { return isUnsigned_; }
-    wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isInt64ToFloatingPoint())
-            return false;
-        if (ins->toInt64ToFloatingPoint()->isUnsigned_ != isUnsigned_)
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// Converts a primitive (either typed or untyped) to an int32. If the input is
-// not primitive at runtime, a bailout occurs. If the input cannot be converted
-// to an int32 without loss (i.e. "5.5" or undefined) then a bailout occurs.
-class MToInt32
-  : public MUnaryInstruction,
-    public ToInt32Policy::Data
-{
-    bool canBeNegativeZero_;
-    MacroAssembler::IntConversionInputKind conversion_;
-
-    explicit MToInt32(MDefinition* def, MacroAssembler::IntConversionInputKind conversion =
-                                            MacroAssembler::IntConversion_Any)
-      : MUnaryInstruction(classOpcode, def),
-        canBeNegativeZero_(true),
-        conversion_(conversion)
-    {
-        setResultType(MIRType::Int32);
-        setMovable();
-
-        // An object might have "valueOf", which means it is effectful.
-        // ToNumber(symbol) throws.
-        if (def->mightBeType(MIRType::Object) || def->mightBeType(MIRType::Symbol))
-            setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(ToInt32)
-    TRIVIAL_NEW_WRAPPERS
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    // this only has backwards information flow.
-    void analyzeEdgeCasesBackward() override;
-
-    bool canBeNegativeZero() const {
-        return canBeNegativeZero_;
-    }
-    void setCanBeNegativeZero(bool negativeZero) {
-        canBeNegativeZero_ = negativeZero;
-    }
-
-    MacroAssembler::IntConversionInputKind conversion() const {
-        return conversion_;
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isToInt32() || ins->toToInt32()->conversion() != conversion())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    void computeRange(TempAllocator& alloc) override;
-    void collectRangeInfoPreTrunc() override;
-
-#ifdef DEBUG
-    bool isConsistentFloat32Use(MUse* use) const override { return true; }
-#endif
-
-    ALLOW_CLONE(MToInt32)
-};
-
-// Converts a value or typed input to a truncated int32, for use with bitwise
-// operations. This is an infallible ValueToECMAInt32.
-class MTruncateToInt32
-  : public MUnaryInstruction,
-    public ToInt32Policy::Data
-{
-    wasm::BytecodeOffset bytecodeOffset_;
-
-    explicit MTruncateToInt32(MDefinition* def,
-                              wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset())
-      : MUnaryInstruction(classOpcode, def),
-        bytecodeOffset_(bytecodeOffset)
-    {
-        setResultType(MIRType::Int32);
-        setMovable();
-
-        // An object might have "valueOf", which means it is effectful.
-        // ToInt32(symbol) throws.
-        if (def->mightBeType(MIRType::Object) || def->mightBeType(MIRType::Symbol))
-            setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(TruncateToInt32)
-    TRIVIAL_NEW_WRAPPERS
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-    TruncateKind operandTruncateKind(size_t index) const override;
-# ifdef DEBUG
-    bool isConsistentFloat32Use(MUse* use) const override {
-        return true;
-    }
-#endif
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return input()->type() < MIRType::Symbol;
-    }
-
-    wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
-
-    ALLOW_CLONE(MTruncateToInt32)
-};
-
-// Converts any type to a string
-class MToString :
-  public MUnaryInstruction,
-  public ToStringPolicy::Data
-{
-    explicit MToString(MDefinition* def)
-      : MUnaryInstruction(classOpcode, def)
-    {
-        setResultType(MIRType::String);
-        setMovable();
-
-        // Objects might override toString and Symbols throw. We bailout in
-        // those cases and run side-effects in baseline instead.
-        if (def->mightBeType(MIRType::Object) || def->mightBeType(MIRType::Symbol))
-            setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(ToString)
-    TRIVIAL_NEW_WRAPPERS
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool fallible() const {
-        return input()->mightBeType(MIRType::Object) ||
-               input()->mightBeType(MIRType::Symbol);
-    }
-
-    ALLOW_CLONE(MToString)
-};
-
-// Converts any type to an object, throwing on null or undefined.
-class MToObject :
-  public MUnaryInstruction,
-  public BoxInputsPolicy::Data
-{
-    explicit MToObject(MDefinition* def)
-      : MUnaryInstruction(classOpcode, def)
-    {
-        setResultType(MIRType::Object);
-        setGuard(); // Throws on null or undefined.
-    }
-
-  public:
-    INSTRUCTION_HEADER(ToObject)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    ALLOW_CLONE(MToObject)
-};
-
-// Converts any type to an object or null value, throwing on undefined.
-class MToObjectOrNull :
-  public MUnaryInstruction,
-  public BoxInputsPolicy::Data
-{
-    explicit MToObjectOrNull(MDefinition* def)
-      : MUnaryInstruction(classOpcode, def)
-    {
-        setResultType(MIRType::ObjectOrNull);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(ToObjectOrNull)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    ALLOW_CLONE(MToObjectOrNull)
-};
-
-class MBitNot
-  : public MUnaryInstruction,
-    public BitwisePolicy::Data
-{
-  protected:
-    explicit MBitNot(MDefinition* input)
-      : MUnaryInstruction(classOpcode, input)
-    {
-        specialization_ = MIRType::None;
-        setResultType(MIRType::Int32);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(BitNot)
-    TRIVIAL_NEW_WRAPPERS
-
-    static MBitNot* NewInt32(TempAllocator& alloc, MDefinition* input);
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    void setSpecialization(MIRType type) {
-        specialization_ = type;
-        setResultType(type);
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        if (specialization_ == MIRType::None)
-            return AliasSet::Store(AliasSet::Any);
-        return AliasSet::None();
-    }
-    void computeRange(TempAllocator& alloc) override;
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return specialization_ != MIRType::None;
-    }
-
-    ALLOW_CLONE(MBitNot)
-};
-
-class MTypeOf
-  : public MUnaryInstruction,
-    public BoxInputsPolicy::Data
-{
-    MIRType inputType_;
-    bool inputMaybeCallableOrEmulatesUndefined_;
-
-    MTypeOf(MDefinition* def, MIRType inputType)
-      : MUnaryInstruction(classOpcode, def), inputType_(inputType),
-        inputMaybeCallableOrEmulatesUndefined_(true)
-    {
-        setResultType(MIRType::String);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(TypeOf)
-    TRIVIAL_NEW_WRAPPERS
-
-    MIRType inputType() const {
-        return inputType_;
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    void cacheInputMaybeCallableOrEmulatesUndefined(CompilerConstraintList* constraints);
-
-    bool inputMaybeCallableOrEmulatesUndefined() const {
-        return inputMaybeCallableOrEmulatesUndefined_;
-    }
-    void markInputNotCallableOrEmulatesUndefined() {
-        inputMaybeCallableOrEmulatesUndefined_ = false;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isTypeOf())
-            return false;
-        if (inputType() != ins->toTypeOf()->inputType())
-            return false;
-        if (inputMaybeCallableOrEmulatesUndefined() !=
-            ins->toTypeOf()->inputMaybeCallableOrEmulatesUndefined())
-        {
-            return false;
-        }
-        return congruentIfOperandsEqual(ins);
-    }
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-};
-
-class MToAsync
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MToAsync(MDefinition* unwrapped)
-      : MUnaryInstruction(classOpcode, unwrapped)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ToAsync)
-    TRIVIAL_NEW_WRAPPERS
-};
-
-class MToAsyncGen
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MToAsyncGen(MDefinition* unwrapped)
-      : MUnaryInstruction(classOpcode, unwrapped)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ToAsyncGen)
-    TRIVIAL_NEW_WRAPPERS
-};
-
-class MToAsyncIter
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MToAsyncIter(MDefinition* unwrapped)
-      : MUnaryInstruction(classOpcode, unwrapped)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ToAsyncIter)
-    TRIVIAL_NEW_WRAPPERS
-};
-
-class MToId
-  : public MUnaryInstruction,
-    public BoxInputsPolicy::Data
-{
-    explicit MToId(MDefinition* index)
-      : MUnaryInstruction(classOpcode, index)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ToId)
-    TRIVIAL_NEW_WRAPPERS
-};
-
-class MBinaryBitwiseInstruction
-  : public MBinaryInstruction,
-    public BitwisePolicy::Data
-{
-  protected:
-    MBinaryBitwiseInstruction(Opcode op, MDefinition* left, MDefinition* right, MIRType type)
-      : MBinaryInstruction(op, left, right), maskMatchesLeftRange(false),
-        maskMatchesRightRange(false)
-    {
-        MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
-        setResultType(type);
-        setMovable();
-    }
-
-    void specializeAs(MIRType type);
-    bool maskMatchesLeftRange;
-    bool maskMatchesRightRange;
-
-  public:
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    MDefinition* foldUnnecessaryBitop();
-    virtual MDefinition* foldIfZero(size_t operand) = 0;
-    virtual MDefinition* foldIfNegOne(size_t operand) = 0;
-    virtual MDefinition* foldIfEqual()  = 0;
-    virtual MDefinition* foldIfAllBitsSet(size_t operand)  = 0;
-    virtual void infer(BaselineInspector* inspector, jsbytecode* pc);
-    void collectRangeInfoPreTrunc() override;
-
-    void setInt32Specialization() {
-        specialization_ = MIRType::Int32;
-        setResultType(MIRType::Int32);
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return binaryCongruentTo(ins);
-    }
-    AliasSet getAliasSet() const override {
-        if (specialization_ >= MIRType::Object)
-            return AliasSet::Store(AliasSet::Any);
-        return AliasSet::None();
-    }
-
-    TruncateKind operandTruncateKind(size_t index) const override;
-};
-
 class MBitAnd : public MBinaryBitwiseInstruction
 {
     MBitAnd(MDefinition* left, MDefinition* right, MIRType type)
       : MBinaryBitwiseInstruction(classOpcode, left, right, type)
     { }
 
   public:
     INSTRUCTION_HEADER(BitAnd)
@@ -6281,485 +3381,16 @@ class MUrsh : public MShiftInstruction
     MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
     bool canRecoverOnBailout() const override {
         return specialization_ < MIRType::Object;
     }
 
     ALLOW_CLONE(MUrsh)
 };
 
-class MSignExtendInt32
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-  public:
-    enum Mode {
-        Byte,
-        Half
-    };
-
-  private:
-    Mode mode_;
-
-    MSignExtendInt32(MDefinition* op, Mode mode)
-      : MUnaryInstruction(classOpcode, op), mode_(mode)
-    {
-        setResultType(MIRType::Int32);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(SignExtendInt32)
-    TRIVIAL_NEW_WRAPPERS
-
-    Mode mode() const { return mode_; }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!congruentIfOperandsEqual(ins))
-            return false;
-        return ins->isSignExtendInt32() && ins->toSignExtendInt32()->mode_ == mode_;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MSignExtendInt32)
-};
-
-class MSignExtendInt64
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-  public:
-    enum Mode {
-        Byte,
-        Half,
-        Word
-    };
-
-  private:
-    Mode mode_;
-
-    MSignExtendInt64(MDefinition* op, Mode mode)
-      : MUnaryInstruction(classOpcode, op), mode_(mode)
-    {
-        setResultType(MIRType::Int64);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(SignExtendInt64)
-    TRIVIAL_NEW_WRAPPERS
-
-    Mode mode() const { return mode_; }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!congruentIfOperandsEqual(ins))
-            return false;
-        return ins->isSignExtendInt64() && ins->toSignExtendInt64()->mode_ == mode_;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    ALLOW_CLONE(MSignExtendInt64)
-};
-
-class MBinaryArithInstruction
-  : public MBinaryInstruction,
-    public ArithPolicy::Data
-{
-    // Implicit truncate flag is set by the truncate backward range analysis
-    // optimization phase, and by wasm pre-processing. It is used in
-    // NeedNegativeZeroCheck to check if the result of a multiplication needs to
-    // produce -0 double value, and for avoiding overflow checks.
-
-    // This optimization happens when the multiplication cannot be truncated
-    // even if all uses are truncating its result, such as when the range
-    // analysis detect a precision loss in the multiplication.
-    TruncateKind implicitTruncate_;
-
-    // Whether we must preserve NaN semantics, and in particular not fold
-    // (x op id) or (id op x) to x, or replace a division by a multiply of the
-    // exact reciprocal.
-    bool mustPreserveNaN_;
-
-  public:
-    MBinaryArithInstruction(Opcode op, MDefinition* left, MDefinition* right)
-      : MBinaryInstruction(op, left, right),
-        implicitTruncate_(NoTruncate),
-        mustPreserveNaN_(false)
-    {
-        specialization_ = MIRType::None;
-        setMovable();
-    }
-
-    static MBinaryArithInstruction* New(TempAllocator& alloc, Opcode op,
-                                        MDefinition* left, MDefinition* right);
-
-    bool constantDoubleResult(TempAllocator& alloc);
-
-    void setMustPreserveNaN(bool b) { mustPreserveNaN_ = b; }
-    bool mustPreserveNaN() const { return mustPreserveNaN_; }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    void printOpcode(GenericPrinter& out) const override;
-
-    virtual double getIdentity() = 0;
-
-    void setSpecialization(MIRType type) {
-        specialization_ = type;
-        setResultType(type);
-    }
-    void setInt32Specialization() {
-        specialization_ = MIRType::Int32;
-        setResultType(MIRType::Int32);
-    }
-    void setNumberSpecialization(TempAllocator& alloc, BaselineInspector* inspector, jsbytecode* pc);
-
-    virtual void trySpecializeFloat32(TempAllocator& alloc) override;
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!binaryCongruentTo(ins))
-            return false;
-        const auto* other = static_cast<const MBinaryArithInstruction*>(ins);
-        return other->mustPreserveNaN_ == mustPreserveNaN_;
-    }
-    AliasSet getAliasSet() const override {
-        if (specialization_ >= MIRType::Object)
-            return AliasSet::Store(AliasSet::Any);
-        return AliasSet::None();
-    }
-
-    bool isTruncated() const {
-        return implicitTruncate_ == Truncate;
-    }
-    TruncateKind truncateKind() const {
-        return implicitTruncate_;
-    }
-    void setTruncateKind(TruncateKind kind) {
-        implicitTruncate_ = Max(implicitTruncate_, kind);
-    }
-};
-
-class MMinMax
-  : public MBinaryInstruction,
-    public ArithPolicy::Data
-{
-    bool isMax_;
-
-    MMinMax(MDefinition* left, MDefinition* right, MIRType type, bool isMax)
-      : MBinaryInstruction(classOpcode, left, right),
-        isMax_(isMax)
-    {
-        MOZ_ASSERT(IsNumberType(type));
-        setResultType(type);
-        setMovable();
-        specialization_ = type;
-    }
-
-  public:
-    INSTRUCTION_HEADER(MinMax)
-    TRIVIAL_NEW_WRAPPERS
-
-    static MMinMax* NewWasm(TempAllocator& alloc, MDefinition* left, MDefinition* right,
-                            MIRType type, bool isMax)
-    {
-        return New(alloc, left, right, type, isMax);
-    }
-
-    bool isMax() const {
-        return isMax_;
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!congruentIfOperandsEqual(ins))
-            return false;
-        const MMinMax* other = ins->toMinMax();
-        return other->isMax() == isMax();
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    void computeRange(TempAllocator& alloc) override;
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    bool isFloat32Commutative() const override { return true; }
-    void trySpecializeFloat32(TempAllocator& alloc) override;
-
-    ALLOW_CLONE(MMinMax)
-};
-
-class MAbs
-  : public MUnaryInstruction,
-    public ArithPolicy::Data
-{
-    bool implicitTruncate_;
-
-    MAbs(MDefinition* num, MIRType type)
-      : MUnaryInstruction(classOpcode, num),
-        implicitTruncate_(false)
-    {
-        MOZ_ASSERT(IsNumberType(type));
-        setResultType(type);
-        setMovable();
-        specialization_ = type;
-    }
-
-  public:
-    INSTRUCTION_HEADER(Abs)
-    TRIVIAL_NEW_WRAPPERS
-
-    static MAbs* NewWasm(TempAllocator& alloc, MDefinition* num, MIRType type) {
-        auto* ins = new(alloc) MAbs(num, type);
-        if (type == MIRType::Int32)
-            ins->implicitTruncate_ = true;
-        return ins;
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    bool fallible() const;
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    void computeRange(TempAllocator& alloc) override;
-    bool isFloat32Commutative() const override { return true; }
-    void trySpecializeFloat32(TempAllocator& alloc) override;
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MAbs)
-};
-
-class MClz
-  : public MUnaryInstruction
-  , public BitwisePolicy::Data
-{
-    bool operandIsNeverZero_;
-
-    explicit MClz(MDefinition* num, MIRType type)
-      : MUnaryInstruction(classOpcode, num),
-        operandIsNeverZero_(false)
-    {
-        MOZ_ASSERT(IsIntType(type));
-        MOZ_ASSERT(IsNumberType(num->type()));
-        specialization_ = type;
-        setResultType(type);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Clz)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, num))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool operandIsNeverZero() const {
-        return operandIsNeverZero_;
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    void computeRange(TempAllocator& alloc) override;
-    void collectRangeInfoPreTrunc() override;
-};
-
-class MCtz
-  : public MUnaryInstruction
-  , public BitwisePolicy::Data
-{
-    bool operandIsNeverZero_;
-
-    explicit MCtz(MDefinition* num, MIRType type)
-      : MUnaryInstruction(classOpcode, num),
-        operandIsNeverZero_(false)
-    {
-        MOZ_ASSERT(IsIntType(type));
-        MOZ_ASSERT(IsNumberType(num->type()));
-        specialization_ = type;
-        setResultType(type);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Ctz)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, num))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool operandIsNeverZero() const {
-        return operandIsNeverZero_;
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    void computeRange(TempAllocator& alloc) override;
-    void collectRangeInfoPreTrunc() override;
-};
-
-class MPopcnt
-  : public MUnaryInstruction
-  , public BitwisePolicy::Data
-{
-    explicit MPopcnt(MDefinition* num, MIRType type)
-      : MUnaryInstruction(classOpcode, num)
-    {
-        MOZ_ASSERT(IsNumberType(num->type()));
-        MOZ_ASSERT(IsIntType(type));
-        specialization_ = type;
-        setResultType(type);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Popcnt)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, num))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    void computeRange(TempAllocator& alloc) override;
-};
-
-// Inline implementation of Math.sqrt().
-class MSqrt
-  : public MUnaryInstruction,
-    public FloatingPointPolicy<0>::Data
-{
-    MSqrt(MDefinition* num, MIRType type)
-      : MUnaryInstruction(classOpcode, num)
-    {
-        setResultType(type);
-        specialization_ = type;
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Sqrt)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    void computeRange(TempAllocator& alloc) override;
-
-    bool isFloat32Commutative() const override { return true; }
-    void trySpecializeFloat32(TempAllocator& alloc) override;
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MSqrt)
-};
-
-class MCopySign
-  : public MBinaryInstruction,
-    public NoTypePolicy::Data
-{
-    MCopySign(MDefinition* lhs, MDefinition* rhs, MIRType type)
-      : MBinaryInstruction(classOpcode, lhs, rhs)
-    {
-        setResultType(type);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(CopySign)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    ALLOW_CLONE(MCopySign)
-};
-
-// Inline implementation of atan2 (arctangent of y/x).
-class MAtan2
-  : public MBinaryInstruction,
-    public MixPolicy<DoublePolicy<0>, DoublePolicy<1> >::Data
-{
-    MAtan2(MDefinition* y, MDefinition* x)
-      : MBinaryInstruction(classOpcode, y, x)
-    {
-        setResultType(MIRType::Double);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Atan2)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, y), (1, x))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MAtan2)
-};
-
 // Inline implementation of Math.hypot().
 class MHypot
   : public MVariadicInstruction,
     public AllDoublePolicy::Data
 {
     MHypot()
       : MVariadicInstruction(classOpcode)
     {
@@ -6793,254 +3424,16 @@ class MHypot
     }
 
     MInstruction* clone(TempAllocator& alloc,
                         const MDefinitionVector& inputs) const override {
        return MHypot::New(alloc, inputs);
     }
 };
 
-// Inline implementation of Math.pow().
-class MPow
-  : public MBinaryInstruction,
-    public PowPolicy::Data
-{
-    MPow(MDefinition* input, MDefinition* power, MIRType powerType)
-      : MBinaryInstruction(classOpcode, input, power)
-    {
-        MOZ_ASSERT(powerType == MIRType::Double ||
-                   powerType == MIRType::Int32 ||
-                   powerType == MIRType::None);
-        specialization_ = powerType;
-        if (powerType == MIRType::None)
-            setResultType(MIRType::Value);
-        else
-            setResultType(MIRType::Double);
-        setMovable();
-    }
-
-    // Helpers for `foldsTo`
-    MDefinition* foldsConstant(TempAllocator &alloc);
-    MDefinition* foldsConstantPower(TempAllocator &alloc);
-
-  public:
-    INSTRUCTION_HEADER(Pow)
-    TRIVIAL_NEW_WRAPPERS
-
-    MDefinition* input() const {
-        return lhs();
-    }
-    MDefinition* power() const {
-        return rhs();
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        if (specialization_ == MIRType::None)
-            return AliasSet::Store(AliasSet::Any);
-        return AliasSet::None();
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return specialization_ != MIRType::None;
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    ALLOW_CLONE(MPow)
-};
-
-// Inline implementation of Math.pow(x, 0.5), which subtly differs from Math.sqrt(x).
-class MPowHalf
-  : public MUnaryInstruction,
-    public DoublePolicy<0>::Data
-{
-    bool operandIsNeverNegativeInfinity_;
-    bool operandIsNeverNegativeZero_;
-    bool operandIsNeverNaN_;
-
-    explicit MPowHalf(MDefinition* input)
-      : MUnaryInstruction(classOpcode, input),
-        operandIsNeverNegativeInfinity_(false),
-        operandIsNeverNegativeZero_(false),
-        operandIsNeverNaN_(false)
-    {
-        setResultType(MIRType::Double);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(PowHalf)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    bool operandIsNeverNegativeInfinity() const {
-        return operandIsNeverNegativeInfinity_;
-    }
-    bool operandIsNeverNegativeZero() const {
-        return operandIsNeverNegativeZero_;
-    }
-    bool operandIsNeverNaN() const {
-        return operandIsNeverNaN_;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    void collectRangeInfoPreTrunc() override;
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MPowHalf)
-};
-
-// Inline implementation of Math.random().
-class MRandom : public MNullaryInstruction
-{
-    MRandom()
-      : MNullaryInstruction(classOpcode)
-    {
-        setResultType(MIRType::Double);
-    }
-
-  public:
-    INSTRUCTION_HEADER(Random)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-
-    bool canRecoverOnBailout() const override {
-#ifdef JS_MORE_DETERMINISTIC
-        return false;
-#else
-        return true;
-#endif
-    }
-
-    ALLOW_CLONE(MRandom)
-};
-
-class MMathFunction
-  : public MUnaryInstruction,
-    public FloatingPointPolicy<0>::Data
-{
-  public:
-    enum Function {
-        Log,
-        Sin,
-        Cos,
-        Exp,
-        Tan,
-        ACos,
-        ASin,
-        ATan,
-        Log10,
-        Log2,
-        Log1P,
-        ExpM1,
-        CosH,
-        SinH,
-        TanH,
-        ACosH,
-        ASinH,
-        ATanH,
-        Sign,
-        Trunc,
-        Cbrt,
-        Floor,
-        Ceil,
-        Round
-    };
-
-  private:
-    Function function_;
-    const MathCache* cache_;
-
-    // A nullptr cache means this function will neither access nor update the cache.
-    MMathFunction(MDefinition* input, Function function, const MathCache* cache)
-      : MUnaryInstruction(classOpcode, input), function_(function), cache_(cache)
-    {
-        setResultType(MIRType::Double);
-        specialization_ = MIRType::Double;
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(MathFunction)
-    TRIVIAL_NEW_WRAPPERS
-
-    Function function() const {
-        return function_;
-    }
-    const MathCache* cache() const {
-        return cache_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isMathFunction())
-            return false;
-        if (ins->toMathFunction()->function() != function())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    static const char* FunctionName(Function function);
-
-    bool isFloat32Commutative() const override {
-        return function_ == Floor || function_ == Ceil || function_ == Round;
-    }
-    void trySpecializeFloat32(TempAllocator& alloc) override;
-    void computeRange(TempAllocator& alloc) override;
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        if (input()->type() == MIRType::SinCosDouble)
-            return false;
-        switch(function_) {
-          case Sin:
-          case Log:
-          case Ceil:
-          case Floor:
-          case Round:
-            return true;
-          default:
-            return false;
-        }
-    }
-
-    ALLOW_CLONE(MMathFunction)
-};
-
 class MAdd : public MBinaryArithInstruction
 {
     MAdd(MDefinition* left, MDefinition* right)
       : MBinaryArithInstruction(classOpcode, left, right)
     {
         setResultType(MIRType::Value);
     }
 
@@ -7477,302 +3870,16 @@ class MMod : public MBinaryArithInstruct
 
     bool possiblyCalls() const override {
         return type() == MIRType::Double;
     }
 
     ALLOW_CLONE(MMod)
 };
 
-class MConcat
-  : public MBinaryInstruction,
-    public MixPolicy<ConvertToStringPolicy<0>, ConvertToStringPolicy<1> >::Data
-{
-    MConcat(MDefinition* left, MDefinition* right)
-      : MBinaryInstruction(classOpcode, left, right)
-    {
-        // At least one input should be definitely string
-        MOZ_ASSERT(left->type() == MIRType::String || right->type() == MIRType::String);
-
-        setMovable();
-        setResultType(MIRType::String);
-    }
-
-  public:
-    INSTRUCTION_HEADER(Concat)
-    TRIVIAL_NEW_WRAPPERS
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MConcat)
-};
-
-class MCharCodeAt
-  : public MBinaryInstruction,
-    public MixPolicy<StringPolicy<0>, IntPolicy<1> >::Data
-{
-    MCharCodeAt(MDefinition* str, MDefinition* index)
-        : MBinaryInstruction(classOpcode, str, index)
-    {
-        setMovable();
-        setResultType(MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(CharCodeAt)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    virtual AliasSet getAliasSet() const override {
-        // Strings are immutable, so there is no implicit dependency.
-        return AliasSet::None();
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MCharCodeAt)
-};
-
-class MFromCharCode
-  : public MUnaryInstruction,
-    public IntPolicy<0>::Data
-{
-    explicit MFromCharCode(MDefinition* code)
-      : MUnaryInstruction(classOpcode, code)
-    {
-        setMovable();
-        setResultType(MIRType::String);
-    }
-
-  public:
-    INSTRUCTION_HEADER(FromCharCode)
-    TRIVIAL_NEW_WRAPPERS
-
-    virtual AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MFromCharCode)
-};
-
-class MFromCodePoint
-  : public MUnaryInstruction,
-    public IntPolicy<0>::Data
-{
-    explicit MFromCodePoint(MDefinition* codePoint)
-      : MUnaryInstruction(classOpcode, codePoint)
-    {
-        setGuard(); // throws on invalid code point
-        setMovable();
-        setResultType(MIRType::String);
-    }
-
-  public:
-    INSTRUCTION_HEADER(FromCodePoint)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    ALLOW_CLONE(MFromCodePoint)
-};
-
-class MStringConvertCase
-  : public MUnaryInstruction,
-    public StringPolicy<0>::Data
-{
-  public:
-    enum Mode { LowerCase, UpperCase };
-
-  private:
-    Mode mode_;
-
-    MStringConvertCase(MDefinition* string, Mode mode)
-      : MUnaryInstruction(classOpcode, string), mode_(mode)
-    {
-        setResultType(MIRType::String);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(StringConvertCase)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, string))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins) && ins->toStringConvertCase()->mode() == mode();
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-    Mode mode() const {
-        return mode_;
-    }
-};
-
-class MSinCos
-  : public MUnaryInstruction,
-    public FloatingPointPolicy<0>::Data
-{
-    const MathCache* cache_;
-
-    MSinCos(MDefinition *input, const MathCache *cache)
-      : MUnaryInstruction(classOpcode, input),
-        cache_(cache)
-    {
-        setResultType(MIRType::SinCosDouble);
-        specialization_ = MIRType::Double;
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(SinCos)
-
-    static MSinCos *New(TempAllocator &alloc, MDefinition *input, const MathCache *cache)
-    {
-        return new (alloc) MSinCos(input, cache);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool congruentTo(const MDefinition *ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-    const MathCache* cache() const {
-        return cache_;
-    }
-};
-
-class MStringSplit
-  : public MBinaryInstruction,
-    public MixPolicy<StringPolicy<0>, StringPolicy<1> >::Data
-{
-    CompilerObjectGroup group_;
-
-    MStringSplit(TempAllocator& alloc, CompilerConstraintList* constraints, MDefinition* string,
-                 MDefinition* sep, ObjectGroup* group)
-      : MBinaryInstruction(classOpcode, string, sep),
-        group_(group)
-    {
-        setResultType(MIRType::Object);
-        TemporaryTypeSet* types = MakeSingletonTypeSet(alloc, constraints, group);
-        setResultTypeSet(types);
-    }
-
-  public:
-    INSTRUCTION_HEADER(StringSplit)
-    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
-    NAMED_OPERANDS((0, string), (1, separator))
-
-    ObjectGroup* group() const {
-        return group_;
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-    virtual AliasSet getAliasSet() const override {
-        // Although this instruction returns a new array, we don't have to mark
-        // it as store instruction, see also MNewArray.
-        return AliasSet::None();
-    }
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(group_);
-    }
-};
-
-// Returns the value to use as |this| value. See also ComputeThis and
-// BoxNonStrictThis in Interpreter.h.
-class MComputeThis
-  : public MUnaryInstruction,
-    public BoxPolicy<0>::Data
-{
-    explicit MComputeThis(MDefinition* def)
-      : MUnaryInstruction(classOpcode, def)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ComputeThis)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-
-    // Note: don't override getAliasSet: the thisValue hook can be effectful.
-};
-
-// Load an arrow function's |new.target| value.
-class MArrowNewTarget
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MArrowNewTarget(MDefinition* callee)
-      : MUnaryInstruction(classOpcode, callee)
-    {
-        setResultType(MIRType::Value);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(ArrowNewTarget)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, callee))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        // An arrow function's lexical |this| value is immutable.
-        return AliasSet::None();
-    }
-};
-
 class MPhi final
   : public MDefinition,
     public InlineListNode<MPhi>,
     public NoTypePolicy::Data
 {
     using InputVector = js::Vector<MUse, 2, JitAllocPolicy>;
     InputVector inputs_;
 
@@ -7951,273 +4058,16 @@ class MPhi final
         canConsumeFloat32_ = can;
     }
 
     TruncateKind operandTruncateKind(size_t index) const override;
     bool needTruncation(TruncateKind kind) override;
     void truncate() override;
 };
 
-// The goal of a Beta node is to split a def at a conditionally taken
-// branch, so that uses dominated by it have a different name.
-class MBeta
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-  private:
-    // This is the range induced by a comparison and branch in a preceding
-    // block. Note that this does not reflect any range constraints from
-    // the input value itself, so this value may differ from the range()
-    // range after it is computed.
-    const Range* comparison_;
-
-    MBeta(MDefinition* val, const Range* comp)
-        : MUnaryInstruction(classOpcode, val),
-          comparison_(comp)
-    {
-        setResultType(val->type());
-        setResultTypeSet(val->resultTypeSet());
-    }
-
-  public:
-    INSTRUCTION_HEADER(Beta)
-    TRIVIAL_NEW_WRAPPERS
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-};
-
-// If input evaluates to false (i.e. it's NaN, 0 or -0), 0 is returned, else the input is returned
-class MNaNToZero
-  : public MUnaryInstruction,
-    public DoublePolicy<0>::Data
-{
-    bool operandIsNeverNaN_;
-    bool operandIsNeverNegativeZero_;
-
-    explicit MNaNToZero(MDefinition* input)
-      : MUnaryInstruction(classOpcode, input),
-        operandIsNeverNaN_(false),
-        operandIsNeverNegativeZero_(false)
-    {
-        setResultType(MIRType::Double);
-        setMovable();
-    }
-  public:
-    INSTRUCTION_HEADER(NaNToZero)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool operandIsNeverNaN() const {
-        return operandIsNeverNaN_;
-    }
-
-    bool operandIsNeverNegativeZero() const {
-        return operandIsNeverNegativeZero_;
-    }
-
-    void collectRangeInfoPreTrunc() override;
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-
-    bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MNaNToZero)
-};
-
-// MIR representation of a Value on the OSR BaselineFrame.
-// The Value is indexed off of OsrFrameReg.
-class MOsrValue
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-  private:
-    ptrdiff_t frameOffset_;
-
-    MOsrValue(MOsrEntry* entry, ptrdiff_t frameOffset)
-      : MUnaryInstruction(classOpcode, entry),
-        frameOffset_(frameOffset)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(OsrValue)
-    TRIVIAL_NEW_WRAPPERS
-
-    ptrdiff_t frameOffset() const {
-        return frameOffset_;
-    }
-
-    MOsrEntry* entry() {
-        return getOperand(0)->toOsrEntry();
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// MIR representation of a JSObject scope chain pointer on the OSR BaselineFrame.
-// The pointer is indexed off of OsrFrameReg.
-class MOsrEnvironmentChain
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-  private:
-    explicit MOsrEnvironmentChain(MOsrEntry* entry)
-      : MUnaryInstruction(classOpcode, entry)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(OsrEnvironmentChain)
-    TRIVIAL_NEW_WRAPPERS
-
-    MOsrEntry* entry() {
-        return getOperand(0)->toOsrEntry();
-    }
-};
-
-// MIR representation of a JSObject ArgumentsObject pointer on the OSR BaselineFrame.
-// The pointer is indexed off of OsrFrameReg.
-class MOsrArgumentsObject
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-  private:
-    explicit MOsrArgumentsObject(MOsrEntry* entry)
-      : MUnaryInstruction(classOpcode, entry)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(OsrArgumentsObject)
-    TRIVIAL_NEW_WRAPPERS
-
-    MOsrEntry* entry() {
-        return getOperand(0)->toOsrEntry();
-    }
-};
-
-// MIR representation of the return value on the OSR BaselineFrame.
-// The Value is indexed off of OsrFrameReg.
-class MOsrReturnValue
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-  private:
-    explicit MOsrReturnValue(MOsrEntry* entry)
-      : MUnaryInstruction(classOpcode, entry)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(OsrReturnValue)
-    TRIVIAL_NEW_WRAPPERS
-
-    MOsrEntry* entry() {
-        return getOperand(0)->toOsrEntry();
-    }
-};
-
-class MBinarySharedStub
-  : public MBinaryInstruction,
-    public MixPolicy<BoxPolicy<0>, BoxPolicy<1> >::Data
-{
-  protected:
-    explicit MBinarySharedStub(MDefinition* left, MDefinition* right)
-      : MBinaryInstruction(classOpcode, left, right)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(BinarySharedStub)
-    TRIVIAL_NEW_WRAPPERS
-};
-
-class MUnarySharedStub
-  : public MUnaryInstruction,
-    public BoxPolicy<0>::Data
-{
-    explicit MUnarySharedStub(MDefinition* input)
-      : MUnaryInstruction(classOpcode, input)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(UnarySharedStub)
-    TRIVIAL_NEW_WRAPPERS
-};
-
-class MNullarySharedStub
-  : public MNullaryInstruction
-{
-    explicit MNullarySharedStub()
-      : MNullaryInstruction(classOpcode)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(NullarySharedStub)
-    TRIVIAL_NEW_WRAPPERS
-};
-
-// Check the current frame for over-recursion past the global stack limit.
-class MCheckOverRecursed
-  : public MNullaryInstruction
-{
-    MCheckOverRecursed()
-      : MNullaryInstruction(classOpcode)
-    { }
-
-  public:
-    INSTRUCTION_HEADER(CheckOverRecursed)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// Check whether we need to fire the interrupt handler.
-class MInterruptCheck : public MNullaryInstruction
-{
-    MInterruptCheck()
-      : MNullaryInstruction(classOpcode)
-    {
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(InterruptCheck)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
 // Directly jumps to the indicated trap, leaving Wasm code and reporting a
 // runtime error.
 
 class MWasmTrap
   : public MAryControlInstruction<0, 0>,
     public NoTypePolicy::Data
 {
     wasm::Trap trap_;
@@ -8236,478 +4086,16 @@ class MWasmTrap
     AliasSet getAliasSet() const override {
         return AliasSet::None();
     }
 
     wasm::Trap trap() const { return trap_; }
     wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
 };
 
-// Checks if a value is JS_UNINITIALIZED_LEXICAL, bailout out if so, leaving
-// it to baseline to throw at the correct pc.
-class MLexicalCheck
-  : public MUnaryInstruction,
-    public BoxPolicy<0>::Data
-{
-    BailoutKind kind_;
-    explicit MLexicalCheck(MDefinition* input, BailoutKind kind = Bailout_UninitializedLexical)
-      : MUnaryInstruction(classOpcode, input),
-        kind_(kind)
-    {
-        setResultType(MIRType::Value);
-        setResultTypeSet(input->resultTypeSet());
-        setMovable();
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(LexicalCheck)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    BailoutKind bailoutKind() const {
-        return kind_;
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-};
-
-// Unconditionally throw an uninitialized let error.
-class MThrowRuntimeLexicalError : public MNullaryInstruction
-{
-    unsigned errorNumber_;
-
-    explicit MThrowRuntimeLexicalError(unsigned errorNumber)
-      : MNullaryInstruction(classOpcode),
-        errorNumber_(errorNumber)
-    {
-        setGuard();
-        setResultType(MIRType::None);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ThrowRuntimeLexicalError)
-    TRIVIAL_NEW_WRAPPERS
-
-    unsigned errorNumber() const {
-        return errorNumber_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// In the prologues of global and eval scripts, check for redeclarations.
-class MGlobalNameConflictsCheck : public MNullaryInstruction
-{
-    MGlobalNameConflictsCheck()
-      : MNullaryInstruction(classOpcode)
-    {
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(GlobalNameConflictsCheck)
-    TRIVIAL_NEW_WRAPPERS
-};
-
-// If not defined, set a global variable to |undefined|.
-class MDefVar
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    CompilerPropertyName name_; // Target name to be defined.
-    unsigned attrs_; // Attributes to be set.
-
-  private:
-    MDefVar(PropertyName* name, unsigned attrs, MDefinition* envChain)
-      : MUnaryInstruction(classOpcode, envChain),
-        name_(name),
-        attrs_(attrs)
-    {
-    }
-
-  public:
-    INSTRUCTION_HEADER(DefVar)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, environmentChain))
-
-    PropertyName* name() const {
-        return name_;
-    }
-    unsigned attrs() const {
-        return attrs_;
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(name_);
-    }
-};
-
-class MDefLexical
-  : public MNullaryInstruction
-{
-    CompilerPropertyName name_; // Target name to be defined.
-    unsigned attrs_; // Attributes to be set.
-
-  private:
-    MDefLexical(PropertyName* name, unsigned attrs)
-      : MNullaryInstruction(classOpcode),
-        name_(name),
-        attrs_(attrs)
-    { }
-
-  public:
-    INSTRUCTION_HEADER(DefLexical)
-    TRIVIAL_NEW_WRAPPERS
-
-    PropertyName* name() const {
-        return name_;
-    }
-    unsigned attrs() const {
-        return attrs_;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(name_);
-    }
-};
-
-class MDefFun
-  : public MBinaryInstruction,
-    public ObjectPolicy<0>::Data
-{
-  private:
-    MDefFun(MDefinition* fun, MDefinition* envChain)
-      : MBinaryInstruction(classOpcode, fun, envChain)
-    {}
-
-  public:
-    INSTRUCTION_HEADER(DefFun)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, fun), (1, environmentChain))
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
-class MRegExp : public MNullaryInstruction
-{
-    CompilerGCPointer<RegExpObject*> source_;
-    bool mustClone_;
-    bool hasShared_;
-
-    MRegExp(TempAllocator& alloc, CompilerConstraintList* constraints, RegExpObject* source,
-            bool hasShared)
-      : MNullaryInstruction(classOpcode),
-        source_(source),
-        mustClone_(true),
-        hasShared_(hasShared)
-    {
-        setResultType(MIRType::Object);
-        setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, source));
-    }
-
-  public:
-    INSTRUCTION_HEADER(RegExp)
-    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
-
-    void setDoNotClone() {
-        mustClone_ = false;
-    }
-    bool mustClone() const {
-        return mustClone_;
-    }
-    bool hasShared() const {
-        return hasShared_;
-    }
-    RegExpObject* source() const {
-        return source_;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(source_);
-    }
-};
-
-class MRegExpMatcher
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>,
-                     StringPolicy<1>,
-                     IntPolicy<2> >::Data
-{
-  private:
-
-    MRegExpMatcher(MDefinition* regexp, MDefinition* string, MDefinition* lastIndex)
-      : MTernaryInstruction(classOpcode, regexp, string, lastIndex)
-    {
-        setMovable();
-        // May be object or null.
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(RegExpMatcher)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, regexp), (1, string), (2, lastIndex))
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
-class MRegExpSearcher
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>,
-                     StringPolicy<1>,
-                     IntPolicy<2> >::Data
-{
-  private:
-
-    MRegExpSearcher(MDefinition* regexp, MDefinition* string, MDefinition* lastIndex)
-      : MTernaryInstruction(classOpcode, regexp, string, lastIndex)
-    {
-        setMovable();
-        setResultType(MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(RegExpSearcher)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, regexp), (1, string), (2, lastIndex))
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
-class MRegExpTester
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>,
-                     StringPolicy<1>,
-                     IntPolicy<2> >::Data
-{
-  private:
-
-    MRegExpTester(MDefinition* regexp, MDefinition* string, MDefinition* lastIndex)
-      : MTernaryInstruction(classOpcode, regexp, string, lastIndex)
-    {
-        setMovable();
-        setResultType(MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(RegExpTester)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, regexp), (1, string), (2, lastIndex))
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-};
-
-class MRegExpPrototypeOptimizable
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MRegExpPrototypeOptimizable(MDefinition* object)
-      : MUnaryInstruction(classOpcode, object)
-    {
-        setResultType(MIRType::Boolean);
-    }
-
-  public:
-    INSTRUCTION_HEADER(RegExpPrototypeOptimizable)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-class MRegExpInstanceOptimizable
-  : public MBinaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >::Data
-{
-    explicit MRegExpInstanceOptimizable(MDefinition* object, MDefinition* proto)
-      : MBinaryInstruction(classOpcode, object, proto)
-    {
-        setResultType(MIRType::Boolean);
-    }
-
-  public:
-    INSTRUCTION_HEADER(RegExpInstanceOptimizable)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, proto))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-class MGetFirstDollarIndex
-  : public MUnaryInstruction,
-    public StringPolicy<0>::Data
-{
-    explicit MGetFirstDollarIndex(MDefinition* str)
-      : MUnaryInstruction(classOpcode, str)
-    {
-        setResultType(MIRType::Int32);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(GetFirstDollarIndex)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, str))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-};
-
-class MStringReplace
-  : public MTernaryInstruction,
-    public MixPolicy<StringPolicy<0>, StringPolicy<1>, StringPolicy<2> >::Data
-{
-  private:
-
-    bool isFlatReplacement_;
-
-    MStringReplace(MDefinition* string, MDefinition* pattern, MDefinition* replacement)
-      : MTernaryInstruction(classOpcode, string, pattern, replacement),
-        isFlatReplacement_(false)
-    {
-        setMovable();
-        setResultType(MIRType::String);
-    }
-
-  public:
-    INSTRUCTION_HEADER(StringReplace)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, string), (1, pattern), (2, replacement))
-
-    void setFlatReplacement() {
-        MOZ_ASSERT(!isFlatReplacement_);
-        isFlatReplacement_ = true;
-    }
-
-    bool isFlatReplacement() const {
-        return isFlatReplacement_;
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isStringReplace())
-            return false;
-        if (isFlatReplacement_ != ins->toStringReplace()->isFlatReplacement())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        if (isFlatReplacement_) {
-            MOZ_ASSERT(!pattern()->isRegExp());
-            return true;
-        }
-        return false;
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
-class MSubstr
-  : public MTernaryInstruction,
-    public MixPolicy<StringPolicy<0>, IntPolicy<1>, IntPolicy<2>>::Data
-{
-  private:
-
-    MSubstr(MDefinition* string, MDefinition* begin, MDefinition* length)
-      : MTernaryInstruction(classOpcode, string, begin, length)
-    {
-        setResultType(MIRType::String);
-    }
-
-  public:
-    INSTRUCTION_HEADER(Substr)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, string), (1, begin), (2, length))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-class MClassConstructor : public MNullaryInstruction
-{
-    jsbytecode* pc_;
-
-    explicit MClassConstructor(jsbytecode* pc)
-      : MNullaryInstruction(classOpcode),
-        pc_(pc)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ClassConstructor)
-    TRIVIAL_NEW_WRAPPERS
-
-    jsbytecode* pc() const {
-      return pc_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
 struct LambdaFunctionInfo
 {
     // The functions used in lambdas are the canonical original function in
     // the script, and are immutable except for delazification. Record this
     // information while still on the active thread to avoid races.
     CompilerFunction fun;
     uint16_t flags;
     uint16_t nargs;
@@ -8732,1027 +4120,27 @@ struct LambdaFunctionInfo
         return roots.append(fun->lazyScript());
     }
 
   private:
     LambdaFunctionInfo(const LambdaFunctionInfo&) = delete;
     void operator=(const LambdaFunctionInfo&) = delete;
 };
 
-class MLambda
-  : public MBinaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    const LambdaFunctionInfo info_;
-
-    MLambda(TempAllocator& alloc, CompilerConstraintList* constraints, MDefinition* envChain,
-            MConstant* cst)
-      : MBinaryInstruction(classOpcode, envChain, cst),
-        info_(&cst->toObject().as<JSFunction>())
-    {
-        setResultType(MIRType::Object);
-        if (!info().fun->isSingleton() && !ObjectGroup::useSingletonForClone(info().fun))
-            setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, info().fun));
-    }
-
-  public:
-    INSTRUCTION_HEADER(Lambda)
-    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
-    NAMED_OPERANDS((0, environmentChain))
-
-    MConstant* functionOperand() const {
-        return getOperand(1)->toConstant();
-    }
-    const LambdaFunctionInfo& info() const {
-        return info_;
-    }
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return info_.appendRoots(roots);
-    }
-};
-
-class MLambdaArrow
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1>, ObjectPolicy<2>>::Data
-{
-    const LambdaFunctionInfo info_;
-
-    MLambdaArrow(TempAllocator& alloc, CompilerConstraintList* constraints, MDefinition* envChain,
-                 MDefinition* newTarget, MConstant* cst)
-      : MTernaryInstruction(classOpcode, envChain, newTarget, cst),
-        info_(&cst->toObject().as<JSFunction>())
-    {
-        setResultType(MIRType::Object);
-        MOZ_ASSERT(!ObjectGroup::useSingletonForClone(info().fun));
-        if (!info().fun->isSingleton())
-            setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, info().fun));
-    }
-
-  public:
-    INSTRUCTION_HEADER(LambdaArrow)
-    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
-    NAMED_OPERANDS((0, environmentChain), (1, newTargetDef))
-
-    MConstant* functionOperand() const {
-        return getOperand(2)->toConstant();
-    }
-    const LambdaFunctionInfo& info() const {
-        return info_;
-    }
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return info_.appendRoots(roots);
-    }
-};
-
-class MSetFunName
-  : public MBinaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
-{
-    uint8_t prefixKind_;
-
-    explicit MSetFunName(MDefinition* fun, MDefinition* name, uint8_t prefixKind)
-      : MBinaryInstruction(classOpcode, fun, name),
-        prefixKind_(prefixKind)
-    {
-        setResultType(MIRType::None);
-    }
-
-  public:
-    INSTRUCTION_HEADER(SetFunName)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, fun), (1, name))
-
-    uint8_t prefixKind() const {
-        return prefixKind_;
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
-// Returns obj->slots.
-class MSlots
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MSlots(MDefinition* object)
-      : MUnaryInstruction(classOpcode, object)
-    {
-        setResultType(MIRType::Slots);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Slots)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-
-    ALLOW_CLONE(MSlots)
-};
-
-// Returns obj->elements.
-class MElements
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    bool unboxed_;
-
-    explicit MElements(MDefinition* object, bool unboxed = false)
-      : MUnaryInstruction(classOpcode, object), unboxed_(unboxed)
-    {
-        setResultType(MIRType::Elements);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Elements)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    bool unboxed() const {
-        return unboxed_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins) &&
-               ins->toElements()->unboxed() == unboxed();
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-
-    ALLOW_CLONE(MElements)
-};
-
-// A constant value for some object's typed array elements.
-class MConstantElements : public MNullaryInstruction
-{
-    SharedMem<void*> value_;
-
-  protected:
-    explicit MConstantElements(SharedMem<void*> v)
-      : MNullaryInstruction(classOpcode),
-        value_(v)
-    {
-        setResultType(MIRType::Elements);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(ConstantElements)
-    TRIVIAL_NEW_WRAPPERS
-
-    SharedMem<void*> value() const {
-        return value_;
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    HashNumber valueHash() const override {
-        return (HashNumber)(size_t) value_.asValue();
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return ins->isConstantElements() && ins->toConstantElements()->value() == value();
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    ALLOW_CLONE(MConstantElements)
-};
-
-// Passes through an object's elements, after ensuring it is entirely doubles.
-class MConvertElementsToDoubles
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    explicit MConvertElementsToDoubles(MDefinition* elements)
-      : MUnaryInstruction(classOpcode, elements)
-    {
-        setGuard();
-        setMovable();
-        setResultType(MIRType::Elements);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ConvertElementsToDoubles)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        // This instruction can read and write to the elements' contents.
-        // However, it is alright to hoist this from loops which explicitly
-        // read or write to the elements: such reads and writes will use double
-        // values and can be reordered freely wrt this conversion, except that
-        // definite double loads must follow the conversion. The latter
-        // property is ensured by chaining this instruction with the elements
-        // themselves, in the same manner as MBoundsCheck.
-        return AliasSet::None();
-    }
-};
-
-// If |elements| has the CONVERT_DOUBLE_ELEMENTS flag, convert value to
-// double. Else return the original value.
-class MMaybeToDoubleElement
-  : public MBinaryInstruction,
-    public IntPolicy<1>::Data
-{
-    MMaybeToDoubleElement(MDefinition* elements, MDefinition* value)
-      : MBinaryInstruction(classOpcode, elements, value)
-    {
-        MOZ_ASSERT(elements->type() == MIRType::Elements);
-        setMovable();
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(MaybeToDoubleElement)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, value))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-};
-
-// Passes through an object, after ensuring its elements are not copy on write.
-class MMaybeCopyElementsForWrite
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    bool checkNative_;
-
-    explicit MMaybeCopyElementsForWrite(MDefinition* object, bool checkNative)
-      : MUnaryInstruction(classOpcode, object), checkNative_(checkNative)
-    {
-        setGuard();
-        setMovable();
-        setResultType(MIRType::Object);
-        setResultTypeSet(object->resultTypeSet());
-    }
-
-  public:
-    INSTRUCTION_HEADER(MaybeCopyElementsForWrite)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    bool checkNative() const {
-        return checkNative_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins) &&
-               checkNative() == ins->toMaybeCopyElementsForWrite()->checkNative();
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::ObjectFields);
-    }
-#ifdef DEBUG
-    bool needsResumePoint() const override {
-        // This instruction is idempotent and does not change observable
-        // behavior, so does not need its own resume point.
-        return false;
-    }
-#endif
-
-};
-
-// Load the initialized length from an elements header.
-class MInitializedLength
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    explicit MInitializedLength(MDefinition* elements)
-      : MUnaryInstruction(classOpcode, elements)
-    {
-        setResultType(MIRType::Int32);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(InitializedLength)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-
-    ALLOW_CLONE(MInitializedLength)
-};
-
-// Store to the initialized length in an elements header. Note the input is an
-// *index*, one less than the desired length.
-class MSetInitializedLength
-  : public MBinaryInstruction,
-    public NoTypePolicy::Data
-{
-    MSetInitializedLength(MDefinition* elements, MDefinition* index)
-      : MBinaryInstruction(classOpcode, elements, index)
-    { }
-
-  public:
-    INSTRUCTION_HEADER(SetInitializedLength)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, index))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::ObjectFields);
-    }
-
-    ALLOW_CLONE(MSetInitializedLength)
-};
-
-// Load the array length from an elements header.
-class MArrayLength
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    explicit MArrayLength(MDefinition* elements)
-      : MUnaryInstruction(classOpcode, elements)
-    {
-        setResultType(MIRType::Int32);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(ArrayLength)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-
-    ALLOW_CLONE(MArrayLength)
-};
-
-// Store to the length in an elements header. Note the input is an *index*, one
-// less than the desired length.
-class MSetArrayLength
-  : public MBinaryInstruction,
-    public NoTypePolicy::Data
-{
-    MSetArrayLength(MDefinition* elements, MDefinition* index)
-      : MBinaryInstruction(classOpcode, elements, index)
-    { }
-
-  public:
-    INSTRUCTION_HEADER(SetArrayLength)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, index))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::ObjectFields);
-    }
-
-    // By default no, unless built as a recovered instruction.
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return isRecoveredOnBailout();
-    }
-};
-
-class MGetNextEntryForIterator
-  : public MBinaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >::Data
-{
-  public:
-    enum Mode {
-        Map,
-        Set
-    };
-
-  private:
-    Mode mode_;
-
-    explicit MGetNextEntryForIterator(MDefinition* iter, MDefinition* result, Mode mode)
-      : MBinaryInstruction(classOpcode, iter, result), mode_(mode)
-    {
-        setResultType(MIRType::Boolean);
-    }
-
-  public:
-    INSTRUCTION_HEADER(GetNextEntryForIterator)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, iter), (1, result))
-
-    Mode mode() const {
-        return mode_;
-    }
-};
-
-// Read the length of a typed array.
-class MTypedArrayLength
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MTypedArrayLength(MDefinition* obj)
-      : MUnaryInstruction(classOpcode, obj)
-    {
-        setResultType(MIRType::Int32);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(TypedArrayLength)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::TypedArrayLength);
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-};
-
-// Load a typed array's elements vector.
-class MTypedArrayElements
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MTypedArrayElements(MDefinition* object)
-      : MUnaryInstruction(classOpcode, object)
-    {
-        setResultType(MIRType::Elements);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(TypedArrayElements)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-
-    ALLOW_CLONE(MTypedArrayElements)
-};
-
-class MSetDisjointTypedElements
-  : public MTernaryInstruction,
-    public NoTypePolicy::Data
-{
-    explicit MSetDisjointTypedElements(MDefinition* target, MDefinition* targetOffset,
-                                       MDefinition* source)
-      : MTernaryInstruction(classOpcode, target, targetOffset, source)
-    {
-        MOZ_ASSERT(target->type() == MIRType::Object);
-        MOZ_ASSERT(targetOffset->type() == MIRType::Int32);
-        MOZ_ASSERT(source->type() == MIRType::Object);
-        setResultType(MIRType::None);
-    }
-
-  public:
-    INSTRUCTION_HEADER(SetDisjointTypedElements)
-    NAMED_OPERANDS((0, target), (1, targetOffset), (2, source))
-
-    static MSetDisjointTypedElements*
-    New(TempAllocator& alloc, MDefinition* target, MDefinition* targetOffset,
-        MDefinition* source)
-    {
-        return new(alloc) MSetDisjointTypedElements(target, targetOffset, source);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::UnboxedElement);
-    }
-
-    ALLOW_CLONE(MSetDisjointTypedElements)
-};
-
-// Load a binary data object's "elements", which is just its opaque
-// binary data space. Eventually this should probably be
-// unified with `MTypedArrayElements`.
-class MTypedObjectElements
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    bool definitelyOutline_;
-
-  private:
-    explicit MTypedObjectElements(MDefinition* object, bool definitelyOutline)
-      : MUnaryInstruction(classOpcode, object),
-        definitelyOutline_(definitelyOutline)
-    {
-        setResultType(MIRType::Elements);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(TypedObjectElements)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    bool definitelyOutline() const {
-        return definitelyOutline_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isTypedObjectElements())
-            return false;
-        const MTypedObjectElements* other = ins->toTypedObjectElements();
-        if (other->definitelyOutline() != definitelyOutline())
-            return false;
-        return congruentIfOperandsEqual(other);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-};
-
-// Inlined version of the js::SetTypedObjectOffset() intrinsic.
-class MSetTypedObjectOffset
-  : public MBinaryInstruction,
-    public NoTypePolicy::Data
-{
-  private:
-    MSetTypedObjectOffset(MDefinition* object, MDefinition* offset)
-      : MBinaryInstruction(classOpcode, object, offset)
-    {
-        MOZ_ASSERT(object->type() == MIRType::Object);
-        MOZ_ASSERT(offset->type() == MIRType::Int32);
-        setResultType(MIRType::None);
-    }
-
-  public:
-    INSTRUCTION_HEADER(SetTypedObjectOffset)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, offset))
-
-    AliasSet getAliasSet() const override {
-        // This affects the result of MTypedObjectElements,
-        // which is described as a load of ObjectFields.
-        return AliasSet::Store(AliasSet::ObjectFields);
-    }
-};
-
-class MKeepAliveObject
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MKeepAliveObject(MDefinition* object)
-      : MUnaryInstruction(classOpcode, object)
-    {
-        setResultType(MIRType::None);
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(KeepAliveObject)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-};
-
-// Perform !-operation
-class MNot
-  : public MUnaryInstruction,
-    public TestPolicy::Data
-{
-    bool operandMightEmulateUndefined_;
-    bool operandIsNeverNaN_;
-
-    explicit MNot(MDefinition* input, CompilerConstraintList* constraints = nullptr)
-      : MUnaryInstruction(classOpcode, input),
-        operandMightEmulateUndefined_(true),
-        operandIsNeverNaN_(false)
-    {
-        setResultType(MIRType::Boolean);
-        setMovable();
-        if (constraints)
-            cacheOperandMightEmulateUndefined(constraints);
-    }
-
-    void cacheOperandMightEmulateUndefined(CompilerConstraintList* constraints);
-
-  public:
-    static MNot* NewInt32(TempAllocator& alloc, MDefinition* input) {
-        MOZ_ASSERT(input->type() == MIRType::Int32 || input->type() == MIRType::Int64);
-        auto* ins = new(alloc) MNot(input);
-        ins->setResultType(MIRType::Int32);
-        return ins;
-    }
-
-    INSTRUCTION_HEADER(Not)
-    TRIVIAL_NEW_WRAPPERS
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    void markNoOperandEmulatesUndefined() {
-        operandMightEmulateUndefined_ = false;
-    }
-    bool operandMightEmulateUndefined() const {
-        return operandMightEmulateUndefined_;
-    }
-    bool operandIsNeverNaN() const {
-        return operandIsNeverNaN_;
-    }
-
-    virtual AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    void collectRangeInfoPreTrunc() override;
-
-    void trySpecializeFloat32(TempAllocator& alloc) override;
-    bool isFloat32Commutative() const override { return true; }
-#ifdef DEBUG
-    bool isConsistentFloat32Use(MUse* use) const override {
-        return true;
-    }
-#endif
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-};
-
-// Bailout if index + minimum < 0 or index + maximum >= length. The length used
-// in a bounds check must not be negative, or the wrong result may be computed
-// (unsigned comparisons may be used).
-class MBoundsCheck
-  : public MBinaryInstruction,
-    public MixPolicy<IntPolicy<0>, IntPolicy<1>>::Data
-{
-    // Range over which to perform the bounds check, may be modified by GVN.
-    int32_t minimum_;
-    int32_t maximum_;
-    bool fallible_;
-
-    MBoundsCheck(MDefinition* index, MDefinition* length)
-      : MBinaryInstruction(classOpcode, index, length),
-        minimum_(0), maximum_(0), fallible_(true)
-    {
-        setGuard();
-        setMovable();
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-        MOZ_ASSERT(length->type() == MIRType::Int32);
-
-        // Returns the checked index.
-        setResultType(MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(BoundsCheck)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, index), (1, length))
-
-    int32_t minimum() const {
-        return minimum_;
-    }
-    void setMinimum(int32_t n) {
-        MOZ_ASSERT(fallible_);
-        minimum_ = n;
-    }
-    int32_t maximum() const {
-        return maximum_;
-    }
-    void setMaximum(int32_t n) {
-        MOZ_ASSERT(fallible_);
-        maximum_ = n;
-    }
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isBoundsCheck())
-            return false;
-        const MBoundsCheck* other = ins->toBoundsCheck();
-        if (minimum() != other->minimum() || maximum() != other->maximum())
-            return false;
-        if (fallible() != other->fallible())
-            return false;
-        return congruentIfOperandsEqual(other);
-    }
-    virtual AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    void computeRange(TempAllocator& alloc) override;
-    bool fallible() const {
-        return fallible_;
-    }
-    void collectRangeInfoPreTrunc() override;
-
-    ALLOW_CLONE(MBoundsCheck)
-};
-
-// Bailout if index < minimum.
-class MBoundsCheckLower
-  : public MUnaryInstruction,
-    public IntPolicy<0>::Data
-{
-    int32_t minimum_;
-    bool fallible_;
-
-    explicit MBoundsCheckLower(MDefinition* index)
-      : MUnaryInstruction(classOpcode, index), minimum_(0), fallible_(true)
-    {
-        setGuard();
-        setMovable();
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(BoundsCheckLower)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, index))
-
-    int32_t minimum() const {
-        return minimum_;
-    }
-    void setMinimum(int32_t n) {
-        minimum_ = n;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool fallible() const {
-        return fallible_;
-    }
-    void collectRangeInfoPreTrunc() override;
-};
-
 // Instructions which access an object's elements can either do so on a
 // definition accessing that elements pointer, or on the object itself, if its
 // elements are inline. In the latter case there must be an offset associated
 // with the access.
 static inline bool
 IsValidElementsType(MDefinition* elements, int32_t offsetAdjustment)
 {
     return elements->type() == MIRType::Elements ||
            (elements->type() == MIRType::Object && offsetAdjustment != 0);
 }
 
-// Load a value from a dense array's element vector and does a hole check if the
-// array is not known to be packed.
-class MLoadElement
-  : public MBinaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    bool needsHoleCheck_;
-    bool loadDoubles_;
-    int32_t offsetAdjustment_;
-
-    MLoadElement(MDefinition* elements, MDefinition* index,
-                 bool needsHoleCheck, bool loadDoubles, int32_t offsetAdjustment = 0)
-      : MBinaryInstruction(classOpcode, elements, index),
-        needsHoleCheck_(needsHoleCheck),
-        loadDoubles_(loadDoubles),
-        offsetAdjustment_(offsetAdjustment)
-    {
-        if (needsHoleCheck) {
-            // Uses may be optimized away based on this instruction's result
-            // type. This means it's invalid to DCE this instruction, as we
-            // have to invalidate when we read a hole.
-            setGuard();
-        }
-        setResultType(MIRType::Value);
-        setMovable();
-        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(LoadElement)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, index))
-
-    bool needsHoleCheck() const {
-        return needsHoleCheck_;
-    }
-    bool loadDoubles() const {
-        return loadDoubles_;
-    }
-    int32_t offsetAdjustment() const {
-        return offsetAdjustment_;
-    }
-    bool fallible() const {
-        return needsHoleCheck();
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isLoadElement())
-            return false;
-        const MLoadElement* other = ins->toLoadElement();
-        if (needsHoleCheck() != other->needsHoleCheck())
-            return false;
-        if (loadDoubles() != other->loadDoubles())
-            return false;
-        if (offsetAdjustment() != other->offsetAdjustment())
-            return false;
-        return congruentIfOperandsEqual(other);
-    }
-    AliasType mightAlias(const MDefinition* store) const override;
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::Element);
-    }
-
-    ALLOW_CLONE(MLoadElement)
-};
-
-// Load a value from the elements vector of a native object. If the index is
-// out-of-bounds, or the indexed slot has a hole, undefined is returned instead.
-class MLoadElementHole
-  : public MTernaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    bool needsNegativeIntCheck_;
-    bool needsHoleCheck_;
-
-    MLoadElementHole(MDefinition* elements, MDefinition* index, MDefinition* initLength,
-                     bool needsHoleCheck)
-      : MTernaryInstruction(classOpcode, elements, index, initLength),
-        needsNegativeIntCheck_(true),
-        needsHoleCheck_(needsHoleCheck)
-    {
-        setResultType(MIRType::Value);
-        setMovable();
-
-        // Set the guard flag to make sure we bail when we see a negative
-        // index. We can clear this flag (and needsNegativeIntCheck_) in
-        // collectRangeInfoPreTrunc.
-        setGuard();
-
-        MOZ_ASSERT(elements->type() == MIRType::Elements);
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-        MOZ_ASSERT(initLength->type() == MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(LoadElementHole)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, index), (2, initLength))
-
-    bool needsNegativeIntCheck() const {
-        return needsNegativeIntCheck_;
-    }
-    bool needsHoleCheck() const {
-        return needsHoleCheck_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isLoadElementHole())
-            return false;
-        const MLoadElementHole* other = ins->toLoadElementHole();
-        if (needsHoleCheck() != other->needsHoleCheck())
-            return false;
-        if (needsNegativeIntCheck() != other->needsNegativeIntCheck())
-            return false;
-        return congruentIfOperandsEqual(other);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::Element);
-    }
-    void collectRangeInfoPreTrunc() override;
-
-    ALLOW_CLONE(MLoadElementHole)
-};
-
-class MLoadUnboxedObjectOrNull
-  : public MBinaryInstruction,
-    public SingleObjectPolicy::Data
-{
-  public:
-    enum NullBehavior {
-        HandleNull,
-        BailOnNull,
-        NullNotPossible
-    };
-
-  private:
-    NullBehavior nullBehavior_;
-    int32_t offsetAdjustment_;
-
-    MLoadUnboxedObjectOrNull(MDefinition* elements, MDefinition* index,
-                             NullBehavior nullBehavior, int32_t offsetAdjustment)
-      : MBinaryInstruction(classOpcode, elements, index),
-        nullBehavior_(nullBehavior),
-        offsetAdjustment_(offsetAdjustment)
-    {
-        if (nullBehavior == BailOnNull) {
-            // Don't eliminate loads which bail out on a null pointer, for the
-            // same reason as MLoadElement.
-            setGuard();
-        }
-        setResultType(nullBehavior == HandleNull ? MIRType::Value : MIRType::Object);
-        setMovable();
-        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(LoadUnboxedObjectOrNull)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, index))
-
-    NullBehavior nullBehavior() const {
-        return nullBehavior_;
-    }
-    int32_t offsetAdjustment() const {
-        return offsetAdjustment_;
-    }
-    bool fallible() const {
-        return nullBehavior() == BailOnNull;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isLoadUnboxedObjectOrNull())
-            return false;
-        const MLoadUnboxedObjectOrNull* other = ins->toLoadUnboxedObjectOrNull();
-        if (nullBehavior() != other->nullBehavior())
-            return false;
-        if (offsetAdjustment() != other->offsetAdjustment())
-            return false;
-        return congruentIfOperandsEqual(other);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::UnboxedElement);
-    }
-    AliasType mightAlias(const MDefinition* store) const override;
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    ALLOW_CLONE(MLoadUnboxedObjectOrNull)
-};
-
-class MLoadUnboxedString
-  : public MBinaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    int32_t offsetAdjustment_;
-
-    MLoadUnboxedString(MDefinition* elements, MDefinition* index, int32_t offsetAdjustment = 0)
-      : MBinaryInstruction(classOpcode, elements, index),
-        offsetAdjustment_(offsetAdjustment)
-    {
-        setResultType(MIRType::String);
-        setMovable();
-        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(LoadUnboxedString)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, index))
-
-    int32_t offsetAdjustment() const {
-        return offsetAdjustment_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isLoadUnboxedString())
-            return false;
-        const MLoadUnboxedString* other = ins->toLoadUnboxedString();
-        if (offsetAdjustment() != other->offsetAdjustment())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::UnboxedElement);
-    }
-
-    ALLOW_CLONE(MLoadUnboxedString)
-};
-
 class MStoreElementCommon
 {
     MIRType elementType_;
     bool needsBarrier_;
 
   protected:
     MStoreElementCommon()
       : elementType_(MIRType::Value),
@@ -9770,418 +4158,16 @@ class MStoreElementCommon
     bool needsBarrier() const {
         return needsBarrier_;
     }
     void setNeedsBarrier() {
         needsBarrier_ = true;
     }
 };
 
-// This instruction is used to load an element of a non-escaped inlined array.
-class MLoadElementFromState
-  : public MBinaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    MLoadElementFromState(MDefinition* array, MDefinition* index)
-      : MBinaryInstruction(classOpcode, array, index)
-    {
-        MOZ_ASSERT(array->isArgumentState());
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-        setResultType(MIRType::Value);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(LoadElementFromState)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, array), (1, index));
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// Store a value to a dense array slots vector.
-class MStoreElement
-  : public MTernaryInstruction,
-    public MStoreElementCommon,
-    public MixPolicy<SingleObjectPolicy, NoFloatPolicy<2> >::Data
-{
-    bool needsHoleCheck_;
-    int32_t offsetAdjustment_;
-
-    MStoreElement(MDefinition* elements, MDefinition* index, MDefinition* value,
-                  bool needsHoleCheck, int32_t offsetAdjustment = 0)
-      : MTernaryInstruction(classOpcode, elements, index, value)
-    {
-        needsHoleCheck_ = needsHoleCheck;
-        offsetAdjustment_ = offsetAdjustment;
-        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(StoreElement)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, index), (2, value))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::Element);
-    }
-    bool needsHoleCheck() const {
-        return needsHoleCheck_;
-    }
-    int32_t offsetAdjustment() const {
-        return offsetAdjustment_;
-    }
-    bool fallible() const {
-        return needsHoleCheck();
-    }
-
-    ALLOW_CLONE(MStoreElement)
-};
-
-// Like MStoreElement, but supports indexes >= initialized length. The downside
-// is that we cannot hoist the elements vector and bounds check, since this
-// instruction may update the (initialized) length and reallocate the elements
-// vector.
-class MStoreElementHole
-  : public MQuaternaryInstruction,
-    public MStoreElementCommon,
-    public MixPolicy<SingleObjectPolicy, NoFloatPolicy<3> >::Data
-{
-    MStoreElementHole(MDefinition* object, MDefinition* elements,
-                      MDefinition* index, MDefinition* value)
-      : MQuaternaryInstruction(classOpcode, object, elements, index, value)
-    {
-        MOZ_ASSERT(elements->type() == MIRType::Elements);
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(StoreElementHole)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, elements), (2, index), (3, value))
-
-    AliasSet getAliasSet() const override {
-        // StoreElementHole can update the initialized length, the array length
-        // or reallocate obj->elements.
-        return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
-    }
-
-    ALLOW_CLONE(MStoreElementHole)
-};
-
-// Try to store a value to a dense array slots vector. May fail due to the object being frozen.
-// Cannot be used on an object that has extra indexed properties.
-class MFallibleStoreElement
-  : public MQuaternaryInstruction,
-    public MStoreElementCommon,
-    public MixPolicy<SingleObjectPolicy, NoFloatPolicy<3> >::Data
-{
-    bool strict_;
-
-    MFallibleStoreElement(MDefinition* object, MDefinition* elements,
-                          MDefinition* index, MDefinition* value,
-                          bool strict)
-      : MQuaternaryInstruction(classOpcode, object, elements, index, value),
-        strict_(strict)
-    {
-        MOZ_ASSERT(elements->type() == MIRType::Elements);
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(FallibleStoreElement)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, elements), (2, index), (3, value))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
-    }
-    bool strict() const {
-        return strict_;
-    }
-
-    ALLOW_CLONE(MFallibleStoreElement)
-};
-
-
-// Store an unboxed object or null pointer to a v\ector.
-class MStoreUnboxedObjectOrNull
-  : public MQuaternaryInstruction,
-    public StoreUnboxedObjectOrNullPolicy::Data
-{
-    int32_t offsetAdjustment_;
-    bool preBarrier_;
-
-    MStoreUnboxedObjectOrNull(MDefinition* elements, MDefinition* index,
-                              MDefinition* value, MDefinition* typedObj,
-                              int32_t offsetAdjustment = 0, bool preBarrier = true)
-      : MQuaternaryInstruction(classOpcode, elements, index, value, typedObj),
-        offsetAdjustment_(offsetAdjustment),
-        preBarrier_(preBarrier)
-    {
-        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-        MOZ_ASSERT(typedObj->type() == MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(StoreUnboxedObjectOrNull)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, index), (2, value), (3, typedObj))
-
-    int32_t offsetAdjustment() const {
-        return offsetAdjustment_;
-    }
-    bool preBarrier() const {
-        return preBarrier_;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::UnboxedElement);
-    }
-
-    // For StoreUnboxedObjectOrNullPolicy.
-    void setValue(MDefinition* def) {
-        replaceOperand(2, def);
-    }
-
-    ALLOW_CLONE(MStoreUnboxedObjectOrNull)
-};
-
-// Store an unboxed object or null pointer to a vector.
-class MStoreUnboxedString
-  : public MTernaryInstruction,
-    public MixPolicy<SingleObjectPolicy, ConvertToStringPolicy<2> >::Data
-{
-    int32_t offsetAdjustment_;
-    bool preBarrier_;
-
-    MStoreUnboxedString(MDefinition* elements, MDefinition* index, MDefinition* value,
-                        int32_t offsetAdjustment = 0, bool preBarrier = true)
-      : MTernaryInstruction(classOpcode, elements, index, value),
-        offsetAdjustment_(offsetAdjustment),
-        preBarrier_(preBarrier)
-    {
-        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(StoreUnboxedString)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, index), (2, value))
-
-    int32_t offsetAdjustment() const {
-        return offsetAdjustment_;
-    }
-    bool preBarrier() const {
-        return preBarrier_;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::UnboxedElement);
-    }
-
-    ALLOW_CLONE(MStoreUnboxedString)
-};
-
-// Passes through an object, after ensuring it is converted from an unboxed
-// object to a native representation.
-class MConvertUnboxedObjectToNative
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    CompilerObjectGroup group_;
-
-    explicit MConvertUnboxedObjectToNative(MDefinition* obj, ObjectGroup* group)
-      : MUnaryInstruction(classOpcode, obj),
-        group_(group)
-    {
-        setGuard();
-        setMovable();
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ConvertUnboxedObjectToNative)
-    NAMED_OPERANDS((0, object))
-
-    static MConvertUnboxedObjectToNative* New(TempAllocator& alloc, MDefinition* obj,
-                                              ObjectGroup* group);
-
-    ObjectGroup* group() const {
-        return group_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!congruentIfOperandsEqual(ins))
-            return false;
-        return ins->toConvertUnboxedObjectToNative()->group() == group();
-    }
-    AliasSet getAliasSet() const override {
-        // This instruction can read and write to all parts of the object, but
-        // is marked as non-effectful so it can be consolidated by LICM and GVN
-        // and avoid inhibiting other optimizations.
-        //
-        // This is valid to do because when unboxed objects might have a native
-        // group they can be converted to, we do not optimize accesses to the
-        // unboxed objects and do not guard on their group or shape (other than
-        // in this opcode).
-        //
-        // Later accesses can assume the object has a native representation
-        // and optimize accordingly. Those accesses cannot be reordered before
-        // this instruction, however. This is prevented by chaining this
-        // instruction with the object itself, in the same way as MBoundsCheck.
-        return AliasSet::None();
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(group_);
-    }
-};
-
-// Array.prototype.pop or Array.prototype.shift on a dense array.
-class MArrayPopShift
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-  public:
-    enum Mode {
-        Pop,
-        Shift
-    };
-
-  private:
-    Mode mode_;
-    bool needsHoleCheck_;
-    bool maybeUndefined_;
-
-    MArrayPopShift(MDefinition* object, Mode mode,
-                   bool needsHoleCheck, bool maybeUndefined)
-      : MUnaryInstruction(classOpcode, object), mode_(mode),
-        needsHoleCheck_(needsHoleCheck), maybeUndefined_(maybeUndefined)
-    { }
-
-  public:
-    INSTRUCTION_HEADER(ArrayPopShift)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    bool needsHoleCheck() const {
-        return needsHoleCheck_;
-    }
-    bool maybeUndefined() const {
-        return maybeUndefined_;
-    }
-    bool mode() const {
-        return mode_;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
-    }
-
-    ALLOW_CLONE(MArrayPopShift)
-};
-
-// Array.prototype.push on a dense array. Returns the new array length.
-class MArrayPush
-  : public MBinaryInstruction,
-    public MixPolicy<SingleObjectPolicy, NoFloatPolicy<1> >::Data
-{
-    MArrayPush(MDefinition* object, MDefinition* value)
-      : MBinaryInstruction(classOpcode, object, value)
-    {
-        setResultType(MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ArrayPush)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, value))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
-    }
-    void computeRange(TempAllocator& alloc) override;
-
-    ALLOW_CLONE(MArrayPush)
-};
-
-// Array.prototype.slice on a dense array.
-class MArraySlice
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2>>::Data
-{
-    CompilerObject templateObj_;
-    gc::InitialHeap initialHeap_;
-
-    MArraySlice(CompilerConstraintList* constraints, MDefinition* obj,
-                MDefinition* begin, MDefinition* end,
-                JSObject* templateObj, gc::InitialHeap initialHeap)
-      : MTernaryInstruction(classOpcode, obj, begin, end),
-        templateObj_(templateObj),
-        initialHeap_(initialHeap)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ArraySlice)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, begin), (2, end))
-
-    JSObject* templateObj() const {
-        return templateObj_;
-    }
-
-    gc::InitialHeap initialHeap() const {
-        return initialHeap_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::Element | AliasSet::ObjectFields);
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(templateObj_);
-    }
-};
-
-class MArrayJoin
-    : public MBinaryInstruction,
-      public MixPolicy<ObjectPolicy<0>, StringPolicy<1> >::Data
-{
-    bool optimizeForArray_;
-
-    MArrayJoin(MDefinition* array, MDefinition* sep, bool optimizeForArray)
-        : MBinaryInstruction(classOpcode, array, sep),
-          optimizeForArray_(optimizeForArray)
-    {
-        setResultType(MIRType::String);
-    }
-  public:
-    INSTRUCTION_HEADER(ArrayJoin)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, array), (1, sep))
-
-    bool optimizeForArray() const {
-        return optimizeForArray_;
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-    virtual AliasSet getAliasSet() const override {
-        // Array.join might coerce the elements of the Array to strings.  This
-        // coercion might cause the evaluation of the some JavaScript code.
-        return AliasSet::Store(AliasSet::Any);
-    }
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-};
-
 // All barriered operations - MCompareExchangeTypedArrayElement,
 // MExchangeTypedArrayElement, and MAtomicTypedArrayElementBinop, as
 // well as MLoadUnboxedScalar and MStoreUnboxedScalar when they are
 // marked as requiring a memory barrer - have the following
 // attributes:
 //
 // - Not movable
 // - Not removable
@@ -10196,237 +4182,16 @@ class MArrayJoin
 enum MemoryBarrierRequirement
 {
     DoesNotRequireMemoryBarrier,
     DoesRequireMemoryBarrier
 };
 
 // Also see comments at MMemoryBarrierRequirement, above.
 
-// Load an unboxed scalar value from a typed array or other object.
-class MLoadUnboxedScalar
-  : public MBinaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    Scalar::Type storageType_;
-    Scalar::Type readType_;
-    unsigned numElems_; // used only for SIMD
-    bool requiresBarrier_;
-    int32_t offsetAdjustment_;
-    bool canonicalizeDoubles_;
-
-    MLoadUnboxedScalar(MDefinition* elements, MDefinition* index, Scalar::Type storageType,
-                       MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier,
-                       int32_t offsetAdjustment = 0, bool canonicalizeDoubles = true)
-      : MBinaryInstruction(classOpcode, elements, index),
-        storageType_(storageType),
-        readType_(storageType),
-        numElems_(1),
-        requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
-        offsetAdjustment_(offsetAdjustment),
-        canonicalizeDoubles_(canonicalizeDoubles)
-    {
-        setResultType(MIRType::Value);
-        if (requiresBarrier_)
-            setGuard();         // Not removable or movable
-        else
-            setMovable();
-        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-        MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType);
-    }
-
-  public:
-    INSTRUCTION_HEADER(LoadUnboxedScalar)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, index))
-
-    void setSimdRead(Scalar::Type type, unsigned numElems) {
-        readType_ = type;
-        numElems_ = numElems;
-    }
-    unsigned numElems() const {
-        return numElems_;
-    }
-    Scalar::Type readType() const {
-        return readType_;
-    }
-
-    Scalar::Type storageType() const {
-        return storageType_;
-    }
-    bool fallible() const {
-        // Bailout if the result does not fit in an int32.
-        return readType_ == Scalar::Uint32 && type() == MIRType::Int32;
-    }
-    bool requiresMemoryBarrier() const {
-        return requiresBarrier_;
-    }
-    bool canonicalizeDoubles() const {
-        return canonicalizeDoubles_;
-    }
-    int32_t offsetAdjustment() const {
-        return offsetAdjustment_;
-    }
-    void setOffsetAdjustment(int32_t offsetAdjustment) {
-        offsetAdjustment_ = offsetAdjustment;
-    }
-    AliasSet getAliasSet() const override {
-        // When a barrier is needed make the instruction effectful by
-        // giving it a "store" effect.
-        if (requiresBarrier_)
-            return AliasSet::Store(AliasSet::UnboxedElement);
-        return AliasSet::Load(AliasSet::UnboxedElement);
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (requiresBarrier_)
-            return false;
-        if (!ins->isLoadUnboxedScalar())
-            return false;
-        const MLoadUnboxedScalar* other = ins->toLoadUnboxedScalar();
-        if (storageType_ != other->storageType_)
-            return false;
-        if (readType_ != other->readType_)
-            return false;
-        if (numElems_ != other->numElems_)
-            return false;
-        if (offsetAdjustment() != other->offsetAdjustment())
-            return false;
-        if (canonicalizeDoubles() != other->canonicalizeDoubles())
-            return false;
-        return congruentIfOperandsEqual(other);
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    void computeRange(TempAllocator& alloc) override;
-
-    bool canProduceFloat32() const override { return storageType_ == Scalar::Float32; }
-
-    ALLOW_CLONE(MLoadUnboxedScalar)
-};
-
-// Load a value from a typed array. Out-of-bounds accesses are handled in-line.
-class MLoadTypedArrayElementHole
-  : public MBinaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    Scalar::Type arrayType_;
-    bool allowDouble_;
-
-    MLoadTypedArrayElementHole(MDefinition* object, MDefinition* index, Scalar::Type arrayType, bool allowDouble)
-      : MBinaryInstruction(classOpcode, object, index),
-        arrayType_(arrayType), allowDouble_(allowDouble)
-    {
-        setResultType(MIRType::Value);
-        setMovable();
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-        MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::MaxTypedArrayViewType);
-    }
-
-  public:
-    INSTRUCTION_HEADER(LoadTypedArrayElementHole)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, index))
-
-    Scalar::Type arrayType() const {
-        return arrayType_;
-    }
-    bool allowDouble() const {
-        return allowDouble_;
-    }
-    bool fallible() const {
-        return arrayType_ == Scalar::Uint32 && !allowDouble_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isLoadTypedArrayElementHole())
-            return false;
-        const MLoadTypedArrayElementHole* other = ins->toLoadTypedArrayElementHole();
-        if (arrayType() != other->arrayType())
-            return false;
-        if (allowDouble() != other->allowDouble())
-            return false;
-        return congruentIfOperandsEqual(other);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::UnboxedElement);
-    }
-    bool canProduceFloat32() const override { return arrayType_ == Scalar::Float32; }
-
-    ALLOW_CLONE(MLoadTypedArrayElementHole)
-};
-
-// Load a value fallibly or infallibly from a statically known typed array.
-class MLoadTypedArrayElementStatic
-  : public MUnaryInstruction,
-    public ConvertToInt32Policy<0>::Data
-{
-    MLoadTypedArrayElementStatic(JSObject* someTypedArray, MDefinition* ptr,
-                                 int32_t offset = 0, bool needsBoundsCheck = true)
-      : MUnaryInstruction(classOpcode, ptr), someTypedArray_(someTypedArray), offset_(offset),
-        needsBoundsCheck_(needsBoundsCheck), fallible_(true)
-    {
-        int type = accessType();
-        if (type == Scalar::Float32)
-            setResultType(MIRType::Float32);
-        else if (type == Scalar::Float64)
-            setResultType(MIRType::Double);
-        else
-            setResultType(MIRType::Int32);
-    }
-
-    CompilerObject someTypedArray_;
-
-    // An offset to be encoded in the load instruction - taking advantage of the
-    // addressing modes. This is only non-zero when the access is proven to be
-    // within bounds.
-    int32_t offset_;
-    bool needsBoundsCheck_;
-    bool fallible_;
-
-  public:
-    INSTRUCTION_HEADER(LoadTypedArrayElementStatic)
-    TRIVIAL_NEW_WRAPPERS
-
-    Scalar::Type accessType() const {
-        return someTypedArray_->as<TypedArrayObject>().type();
-    }
-    SharedMem<void*> base() const;
-    size_t length() const;
-
-    MDefinition* ptr() const { return getOperand(0); }
-    int32_t offset() const { return offset_; }
-    void setOffset(int32_t offset) { offset_ = offset; }
-    bool congruentTo(const MDefinition* ins) const override;
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::UnboxedElement);
-    }
-
-    bool needsBoundsCheck() const { return needsBoundsCheck_; }
-    void setNeedsBoundsCheck(bool v) { needsBoundsCheck_ = v; }
-
-    bool fallible() const {
-        return fallible_;
-    }
-
-    void setInfallible() {
-        fallible_ = false;
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-    bool needTruncation(TruncateKind kind) override;
-    bool canProduceFloat32() const override { return accessType() == Scalar::Float32; }
-    void collectRangeInfoPreTrunc() override;
-
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(someTypedArray_);
-    }
-};
-
 // Base class for MIR ops that write unboxed scalar values.
 class StoreUnboxedScalarBase
 {
     Scalar::Type writeType_;
 
   protected:
     explicit StoreUnboxedScalarBase(Scalar::Type writeType)
       : writeType_(writeType)
@@ -10457,399 +4222,16 @@ class StoreUnboxedScalarBase
         return writeType_ == Scalar::Float32 ||
                writeType_ == Scalar::Float64;
     }
     bool isSimdWrite() const {
         return Scalar::isSimdType(writeType());
     }
 };
 
-// Store an unboxed scalar value to a typed array or other object.
-class MStoreUnboxedScalar
-  : public MTernaryInstruction,
-    public StoreUnboxedScalarBase,
-    public StoreUnboxedScalarPolicy::Data
-{
-  public:
-    enum TruncateInputKind {
-        DontTruncateInput,
-        TruncateInput
-    };
-
-  private:
-    Scalar::Type storageType_;
-
-    // Whether this store truncates out of range inputs, for use by range analysis.
-    TruncateInputKind truncateInput_;
-
-    bool requiresBarrier_;
-    int32_t offsetAdjustment_;
-    unsigned numElems_; // used only for SIMD
-
-    MStoreUnboxedScalar(MDefinition* elements, MDefinition* index, MDefinition* value,
-                        Scalar::Type storageType, TruncateInputKind truncateInput,
-                        MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier,
-                        int32_t offsetAdjustment = 0)
-      : MTernaryInstruction(classOpcode, elements, index, value),
-        StoreUnboxedScalarBase(storageType),
-        storageType_(storageType),
-        truncateInput_(truncateInput),
-        requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
-        offsetAdjustment_(offsetAdjustment),
-        numElems_(1)
-    {
-        if (requiresBarrier_)
-            setGuard();         // Not removable or movable
-        else
-            setMovable();
-        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-        MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType);
-    }
-
-  public:
-    INSTRUCTION_HEADER(StoreUnboxedScalar)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, index), (2, value))
-
-    void setSimdWrite(Scalar::Type writeType, unsigned numElems) {
-        MOZ_ASSERT(Scalar::isSimdType(writeType));
-        setWriteType(writeType);
-        numElems_ = numElems;
-    }
-    unsigned numElems() const {
-        return numElems_;
-    }
-    Scalar::Type storageType() const {
-        return storageType_;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::UnboxedElement);
-    }
-    TruncateInputKind truncateInput() const {
-        return truncateInput_;
-    }
-    bool requiresMemoryBarrier() const {
-        return requiresBarrier_;
-    }
-    int32_t offsetAdjustment() const {
-        return offsetAdjustment_;
-    }
-    TruncateKind operandTruncateKind(size_t index) const override;
-
-    bool canConsumeFloat32(MUse* use) const override {
-        return use == getUseFor(2) && writeType() == Scalar::Float32;
-    }
-
-    ALLOW_CLONE(MStoreUnboxedScalar)
-};
-
-class MStoreTypedArrayElementHole
-  : public MQuaternaryInstruction,
-    public StoreUnboxedScalarBase,
-    public StoreTypedArrayHolePolicy::Data
-{
-    MStoreTypedArrayElementHole(MDefinition* elements, MDefinition* length, MDefinition* index,
-                                MDefinition* value, Scalar::Type arrayType)
-      : MQuaternaryInstruction(classOpcode, elements, length, index, value),
-        StoreUnboxedScalarBase(arrayType)
-    {
-        setMovable();
-        MOZ_ASSERT(elements->type() == MIRType::Elements);
-        MOZ_ASSERT(length->type() == MIRType::Int32);
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-        MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::MaxTypedArrayViewType);
-    }
-
-  public:
-    INSTRUCTION_HEADER(StoreTypedArrayElementHole)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, length), (2, index), (3, value))
-
-    Scalar::Type arrayType() const {
-        MOZ_ASSERT(!Scalar::isSimdType(writeType()),
-                   "arrayType == writeType iff the write type isn't SIMD");
-        return writeType();
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::UnboxedElement);
-    }
-    TruncateKind operandTruncateKind(size_t index) const override;
-
-    bool canConsumeFloat32(MUse* use) const override {
-        return use == getUseFor(3) && arrayType() == Scalar::Float32;
-    }
-
-    ALLOW_CLONE(MStoreTypedArrayElementHole)
-};
-
-// Store a value infallibly to a statically known typed array.
-class MStoreTypedArrayElementStatic :
-    public MBinaryInstruction,
-    public StoreUnboxedScalarBase,
-    public StoreTypedArrayElementStaticPolicy::Data
-{
-    MStoreTypedArrayElementStatic(JSObject* someTypedArray, MDefinition* ptr, MDefinition* v,
-                                  int32_t offset = 0, bool needsBoundsCheck = true)
-        : MBinaryInstruction(classOpcode, ptr, v),
-          StoreUnboxedScalarBase(someTypedArray->as<TypedArrayObject>().type()),
-          someTypedArray_(someTypedArray),
-          offset_(offset), needsBoundsCheck_(needsBoundsCheck)
-    {}
-
-    CompilerObject someTypedArray_;
-
-    // An offset to be encoded in the store instruction - taking advantage of the
-    // addressing modes. This is only non-zero when the access is proven to be
-    // within bounds.
-    int32_t offset_;
-    bool needsBoundsCheck_;
-
-  public:
-    INSTRUCTION_HEADER(StoreTypedArrayElementStatic)
-    TRIVIAL_NEW_WRAPPERS
-
-    Scalar::Type accessType() const {
-        return writeType();
-    }
-
-    SharedMem<void*> base() const;
-    size_t length() const;
-
-    MDefinition* ptr() const { return getOperand(0); }
-    MDefinition* value() const { return getOperand(1); }
-    bool needsBoundsCheck() const { return needsBoundsCheck_; }
-    void setNeedsBoundsCheck(bool v) { needsBoundsCheck_ = v; }
-    int32_t offset() const { return offset_; }
-    void setOffset(int32_t offset) { offset_ = offset; }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::UnboxedElement);
-    }
-    TruncateKind operandTruncateKind(size_t index) const override;
-
-    bool canConsumeFloat32(MUse* use) const override {
-        return use == getUseFor(1) && accessType() == Scalar::Float32;
-    }
-    void collectRangeInfoPreTrunc() override;
-
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(someTypedArray_);
-    }
-};
-
-// Compute an "effective address", i.e., a compound computation of the form:
-//   base + index * scale + displacement
-class MEffectiveAddress
-  : public MBinaryInstruction,
-    public NoTypePolicy::Data
-{
-    MEffectiveAddress(MDefinition* base, MDefinition* index, Scale scale, int32_t displacement)
-      : MBinaryInstruction(classOpcode, base, index),
-        scale_(scale), displacement_(displacement)
-    {
-        MOZ_ASSERT(base->type() == MIRType::Int32);
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-        setMovable();
-        setResultType(MIRType::Int32);
-    }
-
-    Scale scale_;
-    int32_t displacement_;
-
-  public:
-    INSTRUCTION_HEADER(EffectiveAddress)
-    TRIVIAL_NEW_WRAPPERS
-
-    MDefinition* base() const {
-        return lhs();
-    }
-    MDefinition* index() const {
-        return rhs();
-    }
-    Scale scale() const {
-        return scale_;
-    }
-    int32_t displacement() const {
-        return displacement_;
-    }
-
-    ALLOW_CLONE(MEffectiveAddress)
-};
-
-// Clamp input to range [0, 255] for Uint8ClampedArray.
-class MClampToUint8
-  : public MUnaryInstruction,
-    public ClampPolicy::Data
-{
-    explicit MClampToUint8(MDefinition* input)
-      : MUnaryInstruction(classOpcode, input)
-    {
-        setResultType(MIRType::Int32);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(ClampToUint8)
-    TRIVIAL_NEW_WRAPPERS
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    void computeRange(TempAllocator& alloc) override;
-
-    ALLOW_CLONE(MClampToUint8)
-};
-
-class MLoadFixedSlot
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    size_t slot_;
-
-  protected:
-    MLoadFixedSlot(MDefinition* obj, size_t slot)
-      : MUnaryInstruction(classOpcode, obj), slot_(slot)
-    {
-        setResultType(MIRType::Value);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(LoadFixedSlot)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    size_t slot() const {
-        return slot_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isLoadFixedSlot())
-            return false;
-        if (slot() != ins->toLoadFixedSlot()->slot())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::FixedSlot);
-    }
-
-    AliasType mightAlias(const MDefinition* store) const override;
-
-    ALLOW_CLONE(MLoadFixedSlot)
-};
-
-class MLoadFixedSlotAndUnbox
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    size_t slot_;
-    MUnbox::Mode mode_;
-    BailoutKind bailoutKind_;
-  protected:
-    MLoadFixedSlotAndUnbox(MDefinition* obj, size_t slot, MUnbox::Mode mode, MIRType type,
-                           BailoutKind kind)
-      : MUnaryInstruction(classOpcode, obj), slot_(slot), mode_(mode), bailoutKind_(kind)
-    {
-        setResultType(type);
-        setMovable();
-        if (mode_ == MUnbox::TypeBarrier || mode_ == MUnbox::Fallible)
-            setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(LoadFixedSlotAndUnbox)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    size_t slot() const {
-        return slot_;
-    }
-    MUnbox::Mode mode() const {
-        return mode_;
-    }
-    BailoutKind bailoutKind() const {
-        return bailoutKind_;
-    }
-    bool fallible() const {
-        return mode_ != MUnbox::Infallible;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isLoadFixedSlotAndUnbox() ||
-            slot() != ins->toLoadFixedSlotAndUnbox()->slot() ||
-            mode() != ins->toLoadFixedSlotAndUnbox()->mode())
-        {
-            return false;
-        }
-        return congruentIfOperandsEqual(ins);
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::FixedSlot);
-    }
-
-    AliasType mightAlias(const MDefinition* store) const override;
-
-    ALLOW_CLONE(MLoadFixedSlotAndUnbox);
-};
-
-class MStoreFixedSlot
-  : public MBinaryInstruction,
-    public MixPolicy<SingleObjectPolicy, NoFloatPolicy<1> >::Data
-{
-    bool needsBarrier_;
-    size_t slot_;
-
-    MStoreFixedSlot(MDefinition* obj, MDefinition* rval, size_t slot, bool barrier)
-      : MBinaryInstruction(classOpcode, obj, rval),
-        needsBarrier_(barrier),
-        slot_(slot)
-    { }
-
-  public:
-    INSTRUCTION_HEADER(StoreFixedSlot)
-    NAMED_OPERANDS((0, object), (1, value))
-
-    static MStoreFixedSlot* New(TempAllocator& alloc, MDefinition* obj, size_t slot,
-                                MDefinition* rval)
-    {
-        return new(alloc) MStoreFixedSlot(obj, rval, slot, false);
-    }
-    static MStoreFixedSlot* NewBarriered(TempAllocator& alloc, MDefinition* obj, size_t slot,
-                                         MDefinition* rval)
-    {
-        return new(alloc) MStoreFixedSlot(obj, rval, slot, true);
-    }
-
-    size_t slot() const {
-        return slot_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::FixedSlot);
-    }
-    bool needsBarrier() const {
-        return needsBarrier_;
-    }
-    void setNeedsBarrier(bool needsBarrier = true) {
-        needsBarrier_ = needsBarrier;
-    }
-
-    ALLOW_CLONE(MStoreFixedSlot)
-};
-
 struct InliningTarget
 {
     JSObject* target;
 
     // If target is a singleton, group is nullptr. If target is not a singleton,
     // this is the group we need to guard on when doing a polymorphic inlining
     // dispatch. Note that this can be different from target->group() due to
     // proto mutation.
@@ -10930,281 +4312,28 @@ class InlinePropertyTable : public TempO
     void trimTo(const InliningTargets& targets, const BoolVector& choiceSet);
 
     // Ensure that the InlinePropertyTable's domain is a subset of |targets|.
     void trimToTargets(const InliningTargets& targets);
 
     bool appendRoots(MRootList& roots) const;
 };
 
-class MGetPropertyCache
-  : public MBinaryInstruction,
-    public MixPolicy<BoxExceptPolicy<0, MIRType::Object>, CacheIdPolicy<1>>::Data
-{
-    bool idempotent_ : 1;
-    bool monitoredResult_ : 1;
-
-    InlinePropertyTable* inlinePropertyTable_;
-
-    MGetPropertyCache(MDefinition* obj, MDefinition* id, bool monitoredResult)
-      : MBinaryInstruction(classOpcode, obj, id),
-        idempotent_(false),
-        monitoredResult_(monitoredResult),
-        inlinePropertyTable_(nullptr)
-    {
-        setResultType(MIRType::Value);
-
-        // The cache will invalidate if there are objects with e.g. lookup or
-        // resolve hooks on the proto chain. setGuard ensures this check is not
-        // eliminated.
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(GetPropertyCache)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, value), (1, idval))
-
-    InlinePropertyTable* initInlinePropertyTable(TempAllocator& alloc, jsbytecode* pc) {
-        MOZ_ASSERT(inlinePropertyTable_ == nullptr);
-        inlinePropertyTable_ = new(alloc) InlinePropertyTable(alloc, pc);
-        return inlinePropertyTable_;
-    }
-
-    void clearInlinePropertyTable() {
-        inlinePropertyTable_ = nullptr;
-    }
-
-    InlinePropertyTable* propTable() const {
-        return inlinePropertyTable_;
-    }
-
-    bool idempotent() const {
-        return idempotent_;
-    }
-    void setIdempotent() {
-        idempotent_ = true;
-        setMovable();
-    }
-    bool monitoredResult() const {
-        return monitoredResult_;
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!idempotent_)
-            return false;
-        if (!ins->isGetPropertyCache())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        if (idempotent_) {
-            return AliasSet::Load(AliasSet::ObjectFields |
-                                  AliasSet::FixedSlot |
-                                  AliasSet::DynamicSlot);
-        }
-        return AliasSet::Store(AliasSet::Any);
-    }
-
-    bool allowDoubleResult() const;
-
-    bool appendRoots(MRootList& roots) const override {
-        if (inlinePropertyTable_)
-            return inlinePropertyTable_->appendRoots(roots);
-        return true;
-    }
-};
-
-class MHomeObjectSuperBase
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MHomeObjectSuperBase(MDefinition* homeObject)
-      : MUnaryInstruction(classOpcode, homeObject)
-    {
-        setResultType(MIRType::Object);
-        setGuard(); // May throw if [[Prototype]] is null
-    }
-
-  public:
-    INSTRUCTION_HEADER(HomeObjectSuperBase)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, homeObject))
-};
-
-class MGetPropSuperCache
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, BoxExceptPolicy<1, MIRType::Object>, CacheIdPolicy<2>>::Data
-{
-    MGetPropSuperCache(MDefinition* obj, MDefinition* receiver, MDefinition* id)
-      : MTernaryInstruction(classOpcode, obj, receiver, id)
-    {
-        setResultType(MIRType::Value);
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(GetPropSuperCache)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, receiver), (2, idval))
-};
-
 struct PolymorphicEntry {
     // The group and/or shape to guard against.
     ReceiverGuard receiver;
 
     // The property to load, null for loads from unboxed properties.
     Shape* shape;
 
     bool appendRoots(MRootList& roots) const {
         return roots.append(receiver) && roots.append(shape);
     }
 };
 
-// Emit code to load a value from an object if it matches one of the receivers
-// observed by the baseline IC, else bails out.
-class MGetPropertyPolymorphic
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    Vector<PolymorphicEntry, 4, JitAllocPolicy> receivers_;
-    CompilerPropertyName name_;
-
-    MGetPropertyPolymorphic(TempAllocator& alloc, MDefinition* obj, PropertyName* name)
-      : MUnaryInstruction(classOpcode, obj),
-        receivers_(alloc),
-        name_(name)
-    {
-        setGuard();
-        setMovable();
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(GetPropertyPolymorphic)
-    NAMED_OPERANDS((0, object))
-
-    static MGetPropertyPolymorphic* New(TempAllocator& alloc, MDefinition* obj, PropertyName* name) {
-        return new(alloc) MGetPropertyPolymorphic(alloc, obj, name);
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isGetPropertyPolymorphic())
-            return false;
-        if (name() != ins->toGetPropertyPolymorphic()->name())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-
-    MOZ_MUST_USE bool addReceiver(const ReceiverGuard& receiver, Shape* shape) {
-        PolymorphicEntry entry;
-        entry.receiver = receiver;
-        entry.shape = shape;
-        return receivers_.append(entry);
-    }
-    size_t numReceivers() const {
-        return receivers_.length();
-    }
-    const ReceiverGuard receiver(size_t i) const {
-        return receivers_[i].receiver;
-    }
-    Shape* shape(size_t i) const {
-        return receivers_[i].shape;
-    }
-    PropertyName* name() const {
-        return name_;
-    }
-    AliasSet getAliasSet() const override {
-        bool hasUnboxedLoad = false;
-        for (size_t i = 0; i < numReceivers(); i++) {
-            if (!shape(i)) {
-                hasUnboxedLoad = true;
-                break;
-            }
-        }
-        return AliasSet::Load(AliasSet::ObjectFields |
-                              AliasSet::FixedSlot |
-                              AliasSet::DynamicSlot |
-                              (hasUnboxedLoad ? AliasSet::UnboxedElement : 0));
-    }
-
-    AliasType mightAlias(const MDefinition* store) const override;
-
-    bool appendRoots(MRootList& roots) const override;
-};
-
-// Emit code to store a value to an object's slots if its shape/group matches
-// one of the shapes/groups observed by the baseline IC, else bails out.
-class MSetPropertyPolymorphic
-  : public MBinaryInstruction,
-    public MixPolicy<SingleObjectPolicy, NoFloatPolicy<1> >::Data
-{
-    Vector<PolymorphicEntry, 4, JitAllocPolicy> receivers_;
-    CompilerPropertyName name_;
-    bool needsBarrier_;
-
-    MSetPropertyPolymorphic(TempAllocator& alloc, MDefinition* obj, MDefinition* value,
-                            PropertyName* name)
-      : MBinaryInstruction(classOpcode, obj, value),
-        receivers_(alloc),
-        name_(name),
-        needsBarrier_(false)
-    {
-    }
-
-  public:
-    INSTRUCTION_HEADER(SetPropertyPolymorphic)
-    NAMED_OPERANDS((0, object), (1, value))
-
-    static MSetPropertyPolymorphic* New(TempAllocator& alloc, MDefinition* obj, MDefinition* value,
-                                        PropertyName* name) {
-        return new(alloc) MSetPropertyPolymorphic(alloc, obj, value, name);
-    }
-
-    MOZ_MUST_USE bool addReceiver(const ReceiverGuard& receiver, Shape* shape) {
-        PolymorphicEntry entry;
-        entry.receiver = receiver;
-        entry.shape = shape;
-        return receivers_.append(entry);
-    }
-    size_t numReceivers() const {
-        return receivers_.length();
-    }
-    const ReceiverGuard& receiver(size_t i) const {
-        return receivers_[i].receiver;
-    }
-    Shape* shape(size_t i) const {
-        return receivers_[i].shape;
-    }
-    PropertyName* name() const {
-        return name_;
-    }
-    bool needsBarrier() const {
-        return needsBarrier_;
-    }
-    void setNeedsBarrier() {
-        needsBarrier_ = true;
-    }
-    AliasSet getAliasSet() const override {
-        bool hasUnboxedStore = false;
-        for (size_t i = 0; i < numReceivers(); i++) {
-            if (!shape(i)) {
-                hasUnboxedStore = true;
-                break;
-            }
-        }
-        return AliasSet::Store(AliasSet::ObjectFields |
-                               AliasSet::FixedSlot |
-                               AliasSet::DynamicSlot |
-                               (hasUnboxedStore ? AliasSet::UnboxedElement : 0));
-    }
-    bool appendRoots(MRootList& roots) const override;
-};
-
 class MDispatchInstruction
   : public MControlInstruction,
     public SingleObjectPolicy::Data
 {
     // Map from JSFunction* -> MBasicBlock.
     struct Entry {
         JSFunction* func;
         // If |func| has a singleton group, |funcGroup| is null. Otherwise,
@@ -11353,734 +4482,16 @@ class MFunctionDispatch : public MDispat
     INSTRUCTION_HEADER(FunctionDispatch)
 
     static MFunctionDispatch* New(TempAllocator& alloc, MDefinition* ins) {
         return new(alloc) MFunctionDispatch(alloc, ins);
     }
     bool appendRoots(MRootList& roots) const override;
 };
 
-class MBindNameCache
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    CompilerPropertyName name_;
-    CompilerScript script_;
-    jsbytecode* pc_;
-
-    MBindNameCache(MDefinition* envChain, PropertyName* name, JSScript* script, jsbytecode* pc)
-      : MUnaryInstruction(classOpcode, envChain), name_(name), script_(script), pc_(pc)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(BindNameCache)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, environmentChain))
-
-    PropertyName* name() const {
-        return name_;
-    }
-    JSScript* script() const {
-        return script_;
-    }
-    jsbytecode* pc() const {
-        return pc_;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        // Don't append the script, all scripts are added anyway.
-        return roots.append(name_);
-    }
-};
-
-class MCallBindVar
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MCallBindVar(MDefinition* envChain)
-      : MUnaryInstruction(classOpcode, envChain)
-    {
-        setResultType(MIRType::Object);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(CallBindVar)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, environmentChain))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isCallBindVar())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// Guard on an object's shape.
-class MGuardShape
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    CompilerShape shape_;
-    BailoutKind bailoutKind_;
-
-    MGuardShape(MDefinition* obj, Shape* shape, BailoutKind bailoutKind)
-      : MUnaryInstruction(classOpcode, obj),
-        shape_(shape),
-        bailoutKind_(bailoutKind)
-    {
-        setGuard();
-        setMovable();
-        setResultType(MIRType::Object);
-        setResultTypeSet(obj->resultTypeSet());
-
-        // Disallow guarding on unboxed object shapes. The group is better to
-        // guard on, and guarding on the shape can interact badly with
-        // MConvertUnboxedObjectToNative.
-        MOZ_ASSERT(shape->getObjectClass() != &UnboxedPlainObject::class_);
-    }
-
-  public:
-    INSTRUCTION_HEADER(GuardShape)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    const Shape* shape() const {
-        return shape_;
-    }
-    BailoutKind bailoutKind() const {
-        return bailoutKind_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isGuardShape())
-            return false;
-        if (shape() != ins->toGuardShape()->shape())
-            return false;
-        if (bailoutKind() != ins->toGuardShape()->bailoutKind())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(shape_);
-    }
-};
-
-// Bail if the object's shape or unboxed group is not in the input list.
-class MGuardReceiverPolymorphic
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    Vector<ReceiverGuard, 4, JitAllocPolicy> receivers_;
-
-    MGuardReceiverPolymorphic(TempAllocator& alloc, MDefinition* obj)
-      : MUnaryInstruction(classOpcode, obj),
-        receivers_(alloc)
-    {
-        setGuard();
-        setMovable();
-        setResultType(MIRType::Object);
-        setResultTypeSet(obj->resultTypeSet());
-    }
-
-  public:
-    INSTRUCTION_HEADER(GuardReceiverPolymorphic)
-    NAMED_OPERANDS((0, object))
-
-    static MGuardReceiverPolymorphic* New(TempAllocator& alloc, MDefinition* obj) {
-        return new(alloc) MGuardReceiverPolymorphic(alloc, obj);
-    }
-
-    MOZ_MUST_USE bool addReceiver(const ReceiverGuard& receiver) {
-        return receivers_.append(receiver);
-    }
-    size_t numReceivers() const {
-        return receivers_.length();
-    }
-    const ReceiverGuard& receiver(size_t i) const {
-        return receivers_[i];
-    }
-
-    bool congruentTo(const MDefinition* ins) const override;
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-
-    bool appendRoots(MRootList& roots) const override;
-
-};
-
-// Guard on an object's group, inclusively or exclusively.
-class MGuardObjectGroup
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    CompilerObjectGroup group_;
-    bool bailOnEquality_;
-    BailoutKind bailoutKind_;
-
-    MGuardObjectGroup(MDefinition* obj, ObjectGroup* group, bool bailOnEquality,
-                      BailoutKind bailoutKind)
-      : MUnaryInstruction(classOpcode, obj),
-        group_(group),
-        bailOnEquality_(bailOnEquality),
-        bailoutKind_(bailoutKind)
-    {
-        setGuard();
-        setMovable();
-        setResultType(MIRType::Object);
-
-        // Unboxed groups which might be converted to natives can't be guarded
-        // on, due to MConvertUnboxedObjectToNative.
-        MOZ_ASSERT_IF(group->maybeUnboxedLayoutDontCheckGeneration(),
-                      !group->unboxedLayoutDontCheckGeneration().nativeGroup());
-    }
-
-  public:
-    INSTRUCTION_HEADER(GuardObjectGroup)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    const ObjectGroup* group() const {
-        return group_;
-    }
-    bool bailOnEquality() const {
-        return bailOnEquality_;
-    }
-    BailoutKind bailoutKind() const {
-        return bailoutKind_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isGuardObjectGroup())
-            return false;
-        if (group() != ins->toGuardObjectGroup()->group())
-            return false;
-        if (bailOnEquality() != ins->toGuardObjectGroup()->bailOnEquality())
-            return false;
-        if (bailoutKind() != ins->toGuardObjectGroup()->bailoutKind())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(group_);
-    }
-};
-
-// Guard on an object's identity, inclusively or exclusively.
-class MGuardObjectIdentity
-  : public MBinaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    bool bailOnEquality_;
-
-    MGuardObjectIdentity(MDefinition* obj, MDefinition* expected, bool bailOnEquality)
-      : MBinaryInstruction(classOpcode, obj, expected),
-        bailOnEquality_(bailOnEquality)
-    {
-        setGuard();
-        setMovable();
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(GuardObjectIdentity)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, expected))
-
-    bool bailOnEquality() const {
-        return bailOnEquality_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isGuardObjectIdentity())
-            return false;
-        if (bailOnEquality() != ins->toGuardObjectIdentity()->bailOnEquality())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-};
-
-// Guard on an object's class.
-class MGuardClass
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    const Class* class_;
-
-    MGuardClass(MDefinition* obj, const Class* clasp)
-      : MUnaryInstruction(classOpcode, obj),
-        class_(clasp)
-    {
-        setGuard();
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(GuardClass)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    const Class* getClass() const {
-        return class_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isGuardClass())
-            return false;
-        if (getClass() != ins->toGuardClass()->getClass())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-
-    ALLOW_CLONE(MGuardClass)
-};
-
-// Guard on the presence or absence of an unboxed object's expando.
-class MGuardUnboxedExpando
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    bool requireExpando_;
-    BailoutKind bailoutKind_;
-
-    MGuardUnboxedExpando(MDefinition* obj, bool requireExpando, BailoutKind bailoutKind)
-      : MUnaryInstruction(classOpcode, obj),
-        requireExpando_(requireExpando),
-        bailoutKind_(bailoutKind)
-    {
-        setGuard();
-        setMovable();
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(GuardUnboxedExpando)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    bool requireExpando() const {
-        return requireExpando_;
-    }
-    BailoutKind bailoutKind() const {
-        return bailoutKind_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!congruentIfOperandsEqual(ins))
-            return false;
-        if (requireExpando() != ins->toGuardUnboxedExpando()->requireExpando())
-            return false;
-        return true;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-};
-
-// Load an unboxed plain object's expando.
-class MLoadUnboxedExpando
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-  private:
-    explicit MLoadUnboxedExpando(MDefinition* object)
-      : MUnaryInstruction(classOpcode, object)
-    {
-        setResultType(MIRType::Object);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(LoadUnboxedExpando)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-};
-
-// Load from vp[slot] (slots that are not inline in an object).
-class MLoadSlot
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    uint32_t slot_;
-
-    MLoadSlot(MDefinition* slots, uint32_t slot)
-      : MUnaryInstruction(classOpcode, slots),
-        slot_(slot)
-    {
-        setResultType(MIRType::Value);
-        setMovable();
-        MOZ_ASSERT(slots->type() == MIRType::Slots);
-    }
-
-  public:
-    INSTRUCTION_HEADER(LoadSlot)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, slots))
-
-    uint32_t slot() const {
-        return slot_;
-    }
-
-    HashNumber valueHash() const override;
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isLoadSlot())
-            return false;
-        if (slot() != ins->toLoadSlot()->slot())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    AliasSet getAliasSet() const override {
-        MOZ_ASSERT(slots()->type() == MIRType::Slots);
-        return AliasSet::Load(AliasSet::DynamicSlot);
-    }
-    AliasType mightAlias(const MDefinition* store) const override;
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    ALLOW_CLONE(MLoadSlot)
-};
-
-// Inline call to access a function's environment (scope chain).
-class MFunctionEnvironment
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MFunctionEnvironment(MDefinition* function)
-        : MUnaryInstruction(classOpcode, function)
-    {
-        setResultType(MIRType::Object);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(FunctionEnvironment)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, function))
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    // A function's environment is fixed.
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// Allocate a new LexicalEnvironmentObject.
-class MNewLexicalEnvironmentObject
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    CompilerGCPointer<LexicalScope*> scope_;
-
-    MNewLexicalEnvironmentObject(MDefinition* enclosing, LexicalScope* scope)
-      : MUnaryInstruction(classOpcode, enclosing),
-        scope_(scope)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(NewLexicalEnvironmentObject)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, enclosing))
-
-    LexicalScope* scope() const {
-        return scope_;
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(scope_);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// Allocate a new LexicalEnvironmentObject from existing one
-class MCopyLexicalEnvironmentObject
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    bool copySlots_;
-
-    MCopyLexicalEnvironmentObject(MDefinition* env, bool copySlots)
-      : MUnaryInstruction(classOpcode, env),
-        copySlots_(copySlots)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(CopyLexicalEnvironmentObject)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, env))
-
-    bool copySlots() const {
-        return copySlots_;
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields |
-                              AliasSet::FixedSlot |
-                              AliasSet::DynamicSlot);
-    }
-};
-
-class MHomeObject
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MHomeObject(MDefinition* function)
-        : MUnaryInstruction(classOpcode, function)
-    {
-        setResultType(MIRType::Object);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(HomeObject)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, function))
-
-    // A function's [[HomeObject]] is fixed.
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// Store to vp[slot] (slots that are not inline in an object).
-class MStoreSlot
-  : public MBinaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, NoFloatPolicy<1> >::Data
-{
-    uint32_t slot_;
-    MIRType slotType_;
-    bool needsBarrier_;
-
-    MStoreSlot(MDefinition* slots, uint32_t slot, MDefinition* value, bool barrier)
-        : MBinaryInstruction(classOpcode, slots, value),
-          slot_(slot),
-          slotType_(MIRType::Value),
-          needsBarrier_(barrier)
-    {
-        MOZ_ASSERT(slots->type() == MIRType::Slots);
-    }
-
-  public:
-    INSTRUCTION_HEADER(StoreSlot)
-    NAMED_OPERANDS((0, slots), (1, value))
-
-    static MStoreSlot* New(TempAllocator& alloc, MDefinition* slots, uint32_t slot,
-                           MDefinition* value)
-    {
-        return new(alloc) MStoreSlot(slots, slot, value, false);
-    }
-    static MStoreSlot* NewBarriered(TempAllocator& alloc, MDefinition* slots, uint32_t slot,
-                                    MDefinition* value)
-    {
-        return new(alloc) MStoreSlot(slots, slot, value, true);
-    }
-
-    uint32_t slot() const {
-        return slot_;
-    }
-    MIRType slotType() const {
-        return slotType_;
-    }
-    void setSlotType(MIRType slotType) {
-        MOZ_ASSERT(slotType != MIRType::None);
-        slotType_ = slotType;
-    }
-    bool needsBarrier() const {
-        return needsBarrier_;
-    }
-    void setNeedsBarrier() {
-        needsBarrier_ = true;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::DynamicSlot);
-    }
-    void printOpcode(GenericPrinter& out) const override;
-
-    ALLOW_CLONE(MStoreSlot)
-};
-
-class MGetNameCache
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-  private:
-    explicit MGetNameCache(MDefinition* obj)
-      : MUnaryInstruction(classOpcode, obj)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(GetNameCache)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, envObj))
-};
-
-class MCallGetIntrinsicValue : public MNullaryInstruction
-{
-    CompilerPropertyName name_;
-
-    explicit MCallGetIntrinsicValue(PropertyName* name)
-      : MNullaryInstruction(classOpcode),
-        name_(name)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(CallGetIntrinsicValue)
-    TRIVIAL_NEW_WRAPPERS
-
-    PropertyName* name() const {
-        return name_;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(name_);
-    }
-};
-
-class MSetPropertyInstruction : public MBinaryInstruction
-{
-    CompilerPropertyName name_;
-    bool strict_;
-
-  protected:
-    MSetPropertyInstruction(Opcode op, MDefinition* obj, MDefinition* value, PropertyName* name,
-                            bool strict)
-      : MBinaryInstruction(op, obj, value),
-        name_(name), strict_(strict)
-    {}
-
-  public:
-    NAMED_OPERANDS((0, object), (1, value))
-    PropertyName* name() const {
-        return name_;
-    }
-    bool strict() const {
-        return strict_;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(name_);
-    }
-};
-
-class MSetElementInstruction
-  : public MTernaryInstruction
-{
-    bool strict_;
-  protected:
-    MSetElementInstruction(Opcode op, MDefinition* object, MDefinition* index, MDefinition* value,
-                           bool strict)
-      : MTernaryInstruction(op, object, index, value),
-        strict_(strict)
-    {
-    }
-
-  public:
-    NAMED_OPERANDS((0, object), (1, index), (2, value))
-    bool strict() const {
-        return strict_;
-    }
-};
-
-class MDeleteProperty
-  : public MUnaryInstruction,
-    public BoxInputsPolicy::Data
-{
-    CompilerPropertyName name_;
-    bool strict_;
-
-  protected:
-    MDeleteProperty(MDefinition* val, PropertyName* name, bool strict)
-      : MUnaryInstruction(classOpcode, val),
-        name_(name),
-        strict_(strict)
-    {
-        setResultType(MIRType::Boolean);
-    }
-
-  public:
-    INSTRUCTION_HEADER(DeleteProperty)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, value))
-
-    PropertyName* name() const {
-        return name_;
-    }
-    bool strict() const {
-        return strict_;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(name_);
-    }
-};
-
-class MDeleteElement
-  : public MBinaryInstruction,
-    public BoxInputsPolicy::Data
-{
-    bool strict_;
-
-    MDeleteElement(MDefinition* value, MDefinition* index, bool strict)
-      : MBinaryInstruction(classOpcode, value, index),
-        strict_(strict)
-    {
-        setResultType(MIRType::Boolean);
-    }
-
-  public:
-    INSTRUCTION_HEADER(DeleteElement)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, value), (1, index))
-
-    bool strict() const {
-        return strict_;
-    }
-};
-
 // Note: This uses CallSetElementPolicy to always box its second input,
 // ensuring we don't need two LIR instructions to lower this.
 class MCallSetProperty
   : public MSetPropertyInstruction,
     public CallSetElementPolicy::Data
 {
     MCallSetProperty(MDefinition* obj, MDefinition* value, PropertyName* name, bool strict)
       : MSetPropertyInstruction(classOpcode, obj, value, name, strict)
@@ -12091,119 +4502,16 @@ class MCallSetProperty
     INSTRUCTION_HEADER(CallSetProperty)
     TRIVIAL_NEW_WRAPPERS
 
     bool possiblyCalls() const override {
         return true;
     }
 };
 
-class MSetPropertyCache
-  : public MTernaryInstruction,
-    public MixPolicy<SingleObjectPolicy, CacheIdPolicy<1>, NoFloatPolicy<2>>::Data
-{
-    bool strict_ : 1;
-    bool needsPostBarrier_ : 1;
-    bool needsTypeBarrier_ : 1;
-    bool guardHoles_ : 1;
-
-    MSetPropertyCache(MDefinition* obj, MDefinition* id, MDefinition* value, bool strict,
-                      bool needsPostBarrier, bool typeBarrier, bool guardHoles)
-      : MTernaryInstruction(classOpcode, obj, id, value),
-        strict_(strict),
-        needsPostBarrier_(needsPostBarrier),
-        needsTypeBarrier_(typeBarrier),
-        guardHoles_(guardHoles)
-    {
-    }
-
-  public:
-    INSTRUCTION_HEADER(SetPropertyCache)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, idval), (2, value))
-
-    bool needsPostBarrier() const {
-        return needsPostBarrier_;
-    }
-    bool needsTypeBarrier() const {
-        return needsTypeBarrier_;
-    }
-
-    bool guardHoles() const {
-        return guardHoles_;
-    }
-
-    bool strict() const {
-        return strict_;
-    }
-};
-
-class MCallGetProperty
-  : public MUnaryInstruction,
-    public BoxInputsPolicy::Data
-{
-    CompilerPropertyName name_;
-    bool idempotent_;
-
-    MCallGetProperty(MDefinition* value, PropertyName* name)
-      : MUnaryInstruction(classOpcode, value), name_(name),
-        idempotent_(false)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(CallGetProperty)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, value))
-
-    PropertyName* name() const {
-        return name_;
-    }
-
-    // Constructors need to perform a GetProp on the function prototype.
-    // Since getters cannot be set on the prototype, fetching is non-effectful.
-    // The operation may be safely repeated in case of bailout.
-    void setIdempotent() {
-        idempotent_ = true;
-    }
-    AliasSet getAliasSet() const override {
-        if (!idempotent_)
-            return AliasSet::Store(AliasSet::Any);
-        return AliasSet::None();
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(name_);
-    }
-};
-
-// Inline call to handle lhs[rhs]. The first input is a Value so that this
-// instruction can handle both objects and strings.
-class MCallGetElement
-  : public MBinaryInstruction,
-    public BoxInputsPolicy::Data
-{
-    MCallGetElement(MDefinition* lhs, MDefinition* rhs)
-      : MBinaryInstruction(classOpcode, lhs, rhs)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(CallGetElement)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
 class MCallSetElement
   : public MSetElementInstruction,
     public CallSetElementPolicy::Data
 {
     MCallSetElement(MDefinition* object, MDefinition* index, MDefinition* value, bool strict)
       : MSetElementInstruction(classOpcode, object, index, value, strict)
     {
     }
@@ -12212,61 +4520,16 @@ class MCallSetElement
     INSTRUCTION_HEADER(CallSetElement)
     TRIVIAL_NEW_WRAPPERS
 
     bool possiblyCalls() const override {
         return true;
     }
 };
 
-class MCallInitElementArray
-  : public MTernaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, IntPolicy<1>, BoxPolicy<2> >::Data
-{
-    MCallInitElementArray(MDefinition* obj, MDefinition* index, MDefinition* val)
-      : MTernaryInstruction(classOpcode, obj, index, val)
-    {
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(CallInitElementArray)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, index), (2, value))
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
-class MSetDOMProperty
-  : public MBinaryInstruction,
-    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
-{
-    const JSJitSetterOp func_;
-
-    MSetDOMProperty(const JSJitSetterOp func, MDefinition* obj, MDefinition* val)
-      : MBinaryInstruction(classOpcode, obj, val),
-        func_(func)
-    { }
-
-  public:
-    INSTRUCTION_HEADER(SetDOMProperty)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, value))
-
-    JSJitSetterOp fun() const {
-        return func_;
-    }
-
-    bool possiblyCalls() const override {
-        return true;
-    }
-};
-
 class MGetDOMProperty
   : public MVariadicInstruction,
     public ObjectPolicy<0>::Data
 {
     const JSJitInfo* info_;
 
   protected:
     MGetDOMProperty(Opcode op, const JSJitInfo* jitinfo)
@@ -12414,549 +4677,16 @@ class MGetDOMMember : public MGetDOMProp
     bool congruentTo(const MDefinition* ins) const override {
         if (!ins->isGetDOMMember())
             return false;
 
         return MGetDOMProperty::congruentTo(ins->toGetDOMMember());
     }
 };
 
-class MStringLength
-  : public MUnaryInstruction,
-    public StringPolicy<0>::Data
-{
-    explicit MStringLength(MDefinition* string)
-      : MUnaryInstruction(classOpcode, string)
-    {
-        setResultType(MIRType::Int32);
-        setMovable();
-    }
-  public:
-    INSTRUCTION_HEADER(StringLength)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, string))
-
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        // The string |length| property is immutable, so there is no
-        // implicit dependency.
-        return AliasSet::None();
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MStringLength)
-};
-
-// Inlined assembly for Math.floor(double | float32) -> int32.
-class MFloor
-  : public MUnaryInstruction,
-    public FloatingPointPolicy<0>::Data
-{
-    explicit MFloor(MDefinition* num)
-      : MUnaryInstruction(classOpcode, num)
-    {
-        setResultType(MIRType::Int32);
-        specialization_ = MIRType::Double;
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Floor)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool isFloat32Commutative() const override {
-        return true;
-    }
-    void trySpecializeFloat32(TempAllocator& alloc) override;
-#ifdef DEBUG
-    bool isConsistentFloat32Use(MUse* use) const override {
-        return true;
-    }
-#endif
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    void computeRange(TempAllocator& alloc) override;
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MFloor)
-};
-
-// Inlined assembly version for Math.ceil(double | float32) -> int32.
-class MCeil
-  : public MUnaryInstruction,
-    public FloatingPointPolicy<0>::Data
-{
-    explicit MCeil(MDefinition* num)
-      : MUnaryInstruction(classOpcode, num)
-    {
-        setResultType(MIRType::Int32);
-        specialization_ = MIRType::Double;
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Ceil)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool isFloat32Commutative() const override {
-        return true;
-    }
-    void trySpecializeFloat32(TempAllocator& alloc) override;
-#ifdef DEBUG
-    bool isConsistentFloat32Use(MUse* use) const override {
-        return true;
-    }
-#endif
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    void computeRange(TempAllocator& alloc) override;
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MCeil)
-};
-
-// Inlined version of Math.round(double | float32) -> int32.
-class MRound
-  : public MUnaryInstruction,
-    public FloatingPointPolicy<0>::Data
-{
-    explicit MRound(MDefinition* num)
-      : MUnaryInstruction(classOpcode, num)
-    {
-        setResultType(MIRType::Int32);
-        specialization_ = MIRType::Double;
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(Round)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool isFloat32Commutative() const override {
-        return true;
-    }
-    void trySpecializeFloat32(TempAllocator& alloc) override;
-#ifdef DEBUG
-    bool isConsistentFloat32Use(MUse* use) const override {
-        return true;
-    }
-#endif
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-
-    ALLOW_CLONE(MRound)
-};
-
-// NearbyInt rounds the floating-point input to the nearest integer, according
-// to the RoundingMode.
-class MNearbyInt
-  : public MUnaryInstruction,
-    public FloatingPointPolicy<0>::Data
-{
-    RoundingMode roundingMode_;
-
-    explicit MNearbyInt(MDefinition* num, MIRType resultType, RoundingMode roundingMode)
-      : MUnaryInstruction(classOpcode, num),
-        roundingMode_(roundingMode)
-    {
-        MOZ_ASSERT(HasAssemblerSupport(roundingMode));
-
-        MOZ_ASSERT(IsFloatingPointType(resultType));
-        setResultType(resultType);
-        specialization_ = resultType;
-
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(NearbyInt)
-    TRIVIAL_NEW_WRAPPERS
-
-    static bool HasAssemblerSupport(RoundingMode mode) {
-        return Assembler::HasRoundInstruction(mode);
-    }
-
-    RoundingMode roundingMode() const { return roundingMode_; }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool isFloat32Commutative() const override {
-        return true;
-    }
-    void trySpecializeFloat32(TempAllocator& alloc) override;
-#ifdef DEBUG
-    bool isConsistentFloat32Use(MUse* use) const override {
-        return true;
-    }
-#endif
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins) &&
-               ins->toNearbyInt()->roundingMode() == roundingMode_;
-    }
-
-    void printOpcode(GenericPrinter& out) const override;
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-
-    bool canRecoverOnBailout() const override {
-        switch (roundingMode_) {
-          case RoundingMode::Up:
-          case RoundingMode::Down:
-            return true;
-          default:
-            return false;
-        }
-    }
-
-    ALLOW_CLONE(MNearbyInt)
-};
-
-class MGetIteratorCache
-  : public MUnaryInstruction,
-    public BoxExceptPolicy<0, MIRType::Object>::Data
-{
-    explicit MGetIteratorCache(MDefinition* val)
-      : MUnaryInstruction(classOpcode, val)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(GetIteratorCache)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, value))
-};
-
-class MIteratorMore
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MIteratorMore(MDefinition* iter)
-      : MUnaryInstruction(classOpcode, iter)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(IteratorMore)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, iterator))
-
-};
-
-class MIsNoIter
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    explicit MIsNoIter(MDefinition* def)
-      : MUnaryInstruction(classOpcode, def)
-    {
-        setResultType(MIRType::Boolean);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(IsNoIter)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-class MIteratorEnd
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    explicit MIteratorEnd(MDefinition* iter)
-      : MUnaryInstruction(classOpcode, iter)
-    { }
-
-  public:
-    INSTRUCTION_HEADER(IteratorEnd)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, iterator))
-
-};
-
-// Implementation for 'in' operator using instruction cache
-class MInCache
-  : public MBinaryInstruction,
-    public MixPolicy<CacheIdPolicy<0>, ObjectPolicy<1> >::Data
-{
-    MInCache(MDefinition* key, MDefinition* obj)
-      : MBinaryInstruction(classOpcode, key, obj)
-    {
-        setResultType(MIRType::Boolean);
-    }
-
-  public:
-    INSTRUCTION_HEADER(InCache)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, key), (1, object))
-};
-
-
-// Test whether the index is in the array bounds or a hole.
-class MInArray
-  : public MQuaternaryInstruction,
-    public ObjectPolicy<3>::Data
-{
-    bool needsHoleCheck_;
-    bool needsNegativeIntCheck_;
-
-    MInArray(MDefinition* elements, MDefinition* index,
-             MDefinition* initLength, MDefinition* object,
-             bool needsHoleCheck)
-      : MQuaternaryInstruction(classOpcode, elements, index, initLength, object),
-        needsHoleCheck_(needsHoleCheck),
-        needsNegativeIntCheck_(true)
-    {
-        setResultType(MIRType::Boolean);
-        setMovable();
-        MOZ_ASSERT(elements->type() == MIRType::Elements);
-        MOZ_ASSERT(index->type() == MIRType::Int32);
-        MOZ_ASSERT(initLength->type() == MIRType::Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(InArray)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, elements), (1, index), (2, initLength), (3, object))
-
-    bool needsHoleCheck() const {
-        return needsHoleCheck_;
-    }
-    bool needsNegativeIntCheck() const {
-        return needsNegativeIntCheck_;
-    }
-    void collectRangeInfoPreTrunc() override;
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::Element);
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isInArray())
-            return false;
-        const MInArray* other = ins->toInArray();
-        if (needsHoleCheck() != other->needsHoleCheck())
-            return false;
-        if (needsNegativeIntCheck() != other->needsNegativeIntCheck())
-            return false;
-        return congruentIfOperandsEqual(other);
-    }
-};
-
-class MHasOwnCache
-  : public MBinaryInstruction,
-    public MixPolicy<BoxExceptPolicy<0, MIRType::Object>, CacheIdPolicy<1>>::Data
-{
-    MHasOwnCache(MDefinition* obj, MDefinition* id)
-      : MBinaryInstruction(classOpcode, obj, id)
-    {
-        setResultType(MIRType::Boolean);
-    }
-
-  public:
-    INSTRUCTION_HEADER(HasOwnCache)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, value), (1, idval))
-};
-
-// Implementation for instanceof operator with specific rhs.
-class MInstanceOf
-  : public MUnaryInstruction,
-    public InstanceOfPolicy::Data
-{
-    CompilerObject protoObj_;
-
-    MInstanceOf(MDefinition* obj, JSObject* proto)
-      : MUnaryInstruction(classOpcode, obj),
-        protoObj_(proto)
-    {
-        setResultType(MIRType::Boolean);
-    }
-
-  public:
-    INSTRUCTION_HEADER(InstanceOf)
-    TRIVIAL_NEW_WRAPPERS
-
-    JSObject* prototypeObject() {
-        return protoObj_;
-    }
-
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(protoObj_);
-    }
-};
-
-// Implementation for instanceof operator with unknown rhs.
-class MCallInstanceOf
-  : public MBinaryInstruction,
-    public MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >::Data
-{
-    MCallInstanceOf(MDefinition* obj, MDefinition* proto)
-      : MBinaryInstruction(classOpcode, obj, proto)
-    {
-        setResultType(MIRType::Boolean);
-    }
-
-  public:
-    INSTRUCTION_HEADER(CallInstanceOf)
-    TRIVIAL_NEW_WRAPPERS
-};
-
-class MArgumentsLength : public MNullaryInstruction
-{
-    MArgumentsLength()
-      : MNullaryInstruction(classOpcode)
-    {
-        setResultType(MIRType::Int32);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(ArgumentsLength)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        // Arguments |length| cannot be mutated by Ion Code.
-        return AliasSet::None();
-    }
-
-    void computeRange(TempAllocator& alloc) override;
-
-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
-
-    bool canRecoverOnBailout() const override {
-        return true;
-    }
-};
-
-// This MIR instruction is used to get an argument from the actual arguments.
-class MGetFrameArgument
-  : public MUnaryInstruction,
-    public IntPolicy<0>::Data
-{
-    bool scriptHasSetArg_;
-
-    MGetFrameArgument(MDefinition* idx, bool scriptHasSetArg)
-      : MUnaryInstruction(classOpcode, idx),
-        scriptHasSetArg_(scriptHasSetArg)
-    {
-        setResultType(MIRType::Value);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(GetFrameArgument)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, index))
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        // If the script doesn't have any JSOP_SETARG ops, then this instruction is never
-        // aliased.
-        if (scriptHasSetArg_)
-            return AliasSet::Load(AliasSet::FrameArgument);
-        return AliasSet::None();
-    }
-};
-
-class MNewTarget : public MNullaryInstruction
-{
-    MNewTarget() : MNullaryInstruction(classOpcode) {
-        setResultType(MIRType::Value);
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(NewTarget)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// This MIR instruction is used to set an argument value in the frame.
-class MSetFrameArgument
-  : public MUnaryInstruction,
-    public NoFloatPolicy<0>::Data
-{
-    uint32_t argno_;
-
-    MSetFrameArgument(uint32_t argno, MDefinition* value)
-      : MUnaryInstruction(classOpcode, value),
-        argno_(argno)
-    {
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(SetFrameArgument)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, value))
-
-    uint32_t argno() const {
-        return argno_;
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return false;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::FrameArgument);
-    }
-};
-
 class MRestCommon
 {
     unsigned numFormals_;
     CompilerGCPointer<ArrayObject*> templateObject_;
 
   protected:
     MRestCommon(unsigned numFormals, ArrayObject* templateObject)
       : numFormals_(numFormals),
@@ -12967,287 +4697,16 @@ class MRestCommon
     unsigned numFormals() const {
         return numFormals_;
     }
     ArrayObject* templateObject() const {
         return templateObject_;
     }
 };
 
-class MRest
-  : public MUnaryInstruction,
-    public MRestCommon,
-    public IntPolicy<0>::Data
-{
-    MRest(TempAllocator& alloc, CompilerConstraintList* constraints, MDefinition* numActuals,
-          unsigned numFormals, ArrayObject* templateObject)
-      : MUnaryInstruction(classOpcode, numActuals),
-        MRestCommon(numFormals, templateObject)
-    {
-        setResultType(MIRType::Object);
-        setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, templateObject));
-    }
-
-  public:
-    INSTRUCTION_HEADER(Rest)
-    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
-    NAMED_OPERANDS((0, numActuals))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool possiblyCalls() const override {
-        return true;
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(templateObject());
-    }
-};
-
-class MFilterTypeSet
-  : public MUnaryInstruction,
-    public FilterTypeSetPolicy::Data
-{
-    MFilterTypeSet(MDefinition* def, TemporaryTypeSet* types)
-      : MUnaryInstruction(classOpcode, def)
-    {
-        MOZ_ASSERT(!types->unknown());
-        setResultType(types->getKnownMIRType());
-        setResultTypeSet(types);
-    }
-
-  public:
-    INSTRUCTION_HEADER(FilterTypeSet)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* def) const override {
-        return false;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    virtual bool neverHoist() const override {
-        return resultTypeSet()->empty();
-    }
-    void computeRange(TempAllocator& alloc) override;
-
-    bool isFloat32Commutative() const override {
-        return IsFloatingPointType(type());
-    }
-
-    bool canProduceFloat32() const override;
-    bool canConsumeFloat32(MUse* operand) const override;
-    void trySpecializeFloat32(TempAllocator& alloc) override;
-};
-
-// Given a value, guard that the value is in a particular TypeSet, then returns
-// that value.
-class MTypeBarrier
-  : public MUnaryInstruction,
-    public TypeBarrierPolicy::Data
-{
-    BarrierKind barrierKind_;
-
-    MTypeBarrier(MDefinition* def, TemporaryTypeSet* types,
-                 BarrierKind kind = BarrierKind::TypeSet)
-      : MUnaryInstruction(classOpcode, def),
-        barrierKind_(kind)
-    {
-        MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
-
-        MOZ_ASSERT(!types->unknown());
-        setResultType(types->getKnownMIRType());
-        setResultTypeSet(types);
-
-        setGuard();
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(TypeBarrier)
-    TRIVIAL_NEW_WRAPPERS
-
-    void printOpcode(GenericPrinter& out) const override;
-    bool congruentTo(const MDefinition* def) const override;
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    virtual bool neverHoist() const override {
-        return resultTypeSet()->empty();
-    }
-    BarrierKind barrierKind() const {
-        return barrierKind_;
-    }
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    bool alwaysBails() const {
-        // If mirtype of input doesn't agree with mirtype of barrier,
-        // we will definitely bail.
-        MIRType type = resultTypeSet()->getKnownMIRType();
-        if (type == MIRType::Value)
-            return false;
-        if (input()->type() == MIRType::Value)
-            return false;
-        if (input()->type() == MIRType::ObjectOrNull) {
-            // The ObjectOrNull optimization is only performed when the
-            // barrier's type is MIRType::Null.
-            MOZ_ASSERT(type == MIRType::Null);
-            return false;
-        }
-        return input()->type() != type;
-    }
-
-    ALLOW_CLONE(MTypeBarrier)
-};
-
-// Like MTypeBarrier, guard that the value is in the given type set. This is
-// used before property writes to ensure the value being written is represented
-// in the property types for the object.
-class MMonitorTypes
-  : public MUnaryInstruction,
-    public BoxInputsPolicy::Data
-{
-    const TemporaryTypeSet* typeSet_;
-    BarrierKind barrierKind_;
-
-    MMonitorTypes(MDefinition* def, const TemporaryTypeSet* types, BarrierKind kind)
-      : MUnaryInstruction(classOpcode, def),
-        typeSet_(types),
-        barrierKind_(kind)
-    {
-        MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
-
-        setGuard();
-        MOZ_ASSERT(!types->unknown());
-    }
-
-  public:
-    INSTRUCTION_HEADER(MonitorTypes)
-    TRIVIAL_NEW_WRAPPERS
-
-    const TemporaryTypeSet* typeSet() const {
-        return typeSet_;
-    }
-    BarrierKind barrierKind() const {
-        return barrierKind_;
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
-// Given a value being written to another object, update the generational store
-// buffer if the value is in the nursery and object is in the tenured heap.
-class MPostWriteBarrier : public MBinaryInstruction, public ObjectPolicy<0>::Data
-{
-    MPostWriteBarrier(MDefinition* obj, MDefinition* value)
-      : MBinaryInstruction(classOpcode, obj, value)
-    {
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(PostWriteBarrier)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, value))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-#ifdef DEBUG
-    bool isConsistentFloat32Use(MUse* use) const override {
-        // During lowering, values that neither have object nor value MIR type
-        // are ignored, thus Float32 can show up at this point without any issue.
-        return use == getUseFor(1);
-    }
-#endif
-
-    ALLOW_CLONE(MPostWriteBarrier)
-};
-
-// Given a value being written to another object's elements at the specified
-// index, update the generational store buffer if the value is in the nursery
-// and object is in the tenured heap.
-class MPostWriteElementBarrier : public MTernaryInstruction
-                               , public MixPolicy<ObjectPolicy<0>, IntPolicy<2>>::Data
-{
-    MPostWriteElementBarrier(MDefinition* obj, MDefinition* value, MDefinition* index)
-      : MTernaryInstruction(classOpcode, obj, value, index)
-    {
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(PostWriteElementBarrier)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object), (1, value), (2, index))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-#ifdef DEBUG
-    bool isConsistentFloat32Use(MUse* use) const override {
-        // During lowering, values that neither have object nor value MIR type
-        // are ignored, thus Float32 can show up at this point without any issue.
-        return use == getUseFor(1);
-    }
-#endif
-
-    ALLOW_CLONE(MPostWriteElementBarrier)
-};
-
-class MNewNamedLambdaObject : public MNullaryInstruction
-{
-    CompilerGCPointer<LexicalEnvironmentObject*> templateObj_;
-
-    explicit MNewNamedLambdaObject(LexicalEnvironmentObject* templateObj)
-      : MNullaryInstruction(classOpcode),
-        templateObj_(templateObj)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(NewNamedLambdaObject)
-    TRIVIAL_NEW_WRAPPERS
-
-    LexicalEnvironmentObject* templateObj() {
-        return templateObj_;
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool appendRoots(MRootList& roots) const override {
-        return roots.append(templateObj_);
-    }
-};
-
-class MNewCallObjectBase : public MUnaryInstruction
-                         , public SingleObjectPolicy::Data
-{
-  protected:
-    MNewCallObjectBase(Opcode op, MConstant* templateObj)
-      : MUnaryInstruction(op, templateObj)
-    {
-        setResultType(MIRType::Object);
-    }
-
-  public:
-    CallObject* templateObject() const {
-        return &getOperand(0)->toConstant()->toObject().as<CallObject>();
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-};
-
 class MNewCallObject : public MNewCallObjectBase
 {
   public:
     INSTRUCTION_HEADER(NewCallObject)
     TRIVIAL_NEW_WRAPPERS
 
     explicit MNewCallObject(MConstant* templateObj)
       : MNewCallObjectBase(classOpcode, templateObj)
@@ -14623,91 +6082,16 @@ class MWasmAtomicBinopHeap
     MDefinition* tls() const { return getOperand(2); }
     MDefinition* memoryBase() const { return getOperand(3); }
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::WasmHeap);
     }
 };
 
-class MWasmLoadGlobalVar
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    MWasmLoadGlobalVar(MIRType type, unsigned globalDataOffset, bool isConstant, MDefinition* tlsPtr)
-      : MUnaryInstruction(classOpcode, tlsPtr),
-        globalDataOffset_(globalDataOffset), isConstant_(isConstant)
-    {
-        MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
-        setResultType(type);
-        setMovable();
-    }
-
-    unsigned globalDataOffset_;
-    bool isConstant_;
-
-  public:
-    INSTRUCTION_HEADER(WasmLoadGlobalVar)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, tlsPtr))
-
-    unsigned globalDataOffset() const { return globalDataOffset_; }
-
-    HashNumber valueHash() const override;
-    bool congruentTo(const MDefinition* ins) const override;
-    MDefinition* foldsTo(TempAllocator& alloc) override;
-
-    AliasSet getAliasSet() const override {
-        return isConstant_ ? AliasSet::None() : AliasSet::Load(AliasSet::WasmGlobalVar);
-    }
-
-    AliasType mightAlias(const MDefinition* def) const override;
-};
-
-class MWasmStoreGlobalVar
-  : public MBinaryInstruction,
-    public NoTypePolicy::Data
-{
-    MWasmStoreGlobalVar(unsigned globalDataOffset, MDefinition* value, MDefinition* tlsPtr)
-      : MBinaryInstruction(classOpcode, value, tlsPtr),
-        globalDataOffset_(globalDataOffset)
-    { }
-
-    unsigned globalDataOffset_;
-
-  public:
-    INSTRUCTION_HEADER(WasmStoreGlobalVar)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, value), (1, tlsPtr))
-
-    unsigned globalDataOffset() const { return globalDataOffset_; }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::WasmGlobalVar);
-    }
-};
-
-class MWasmParameter : public MNullaryInstruction
-{
-    ABIArg abi_;
-
-    MWasmParameter(ABIArg abi, MIRType mirType)
-      : MNullaryInstruction(classOpcode),
-        abi_(abi)
-    {
-        setResultType(mirType);
-    }
-
-  public:
-    INSTRUCTION_HEADER(WasmParameter)
-    TRIVIAL_NEW_WRAPPERS
-
-    ABIArg abi() const { return abi_; }
-};
-
 class MWasmReturn
   : public MAryControlInstruction<1, 0>,
     public NoTypePolicy::Data
 {
     explicit MWasmReturn(MDefinition* ins)
       : MAryControlInstruction(classOpcode)
     {
         initOperand(0, ins);
@@ -14726,40 +6110,16 @@ class MWasmReturnVoid
       : MAryControlInstruction(classOpcode)
     { }
 
   public:
     INSTRUCTION_HEADER(WasmReturnVoid)
     TRIVIAL_NEW_WRAPPERS
 };
 
-class MWasmStackArg
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    MWasmStackArg(uint32_t spOffset, MDefinition* ins)
-      : MUnaryInstruction(classOpcode, ins),
-        spOffset_(spOffset)
-    {}
-
-    uint32_t spOffset_;
-
-  public:
-    INSTRUCTION_HEADER(WasmStackArg)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, arg))
-
-    uint32_t spOffset() const {
-        return spOffset_;
-    }
-    void incrementOffset(uint32_t inc) {
-        spOffset_ += inc;
-    }
-};
-
 class MWasmCall final
   : public MVariadicInstruction,
     public NoTypePolicy::Data
 {
     wasm::CallSiteDesc desc_;
     wasm::CalleeDesc callee_;
     FixedList<AnyRegister> argRegs_;
     uint32_t spIncrement_;
@@ -14819,123 +6179,16 @@ class MWasmCall final
         return true;
     }
 
     const ABIArg& instanceArg() const {
         return instanceArg_;
     }
 };
 
-class MWasmSelect
-  : public MTernaryInstruction,
-    public NoTypePolicy::Data
-{
-    MWasmSelect(MDefinition* trueExpr, MDefinition* falseExpr, MDefinition *condExpr)
-      : MTernaryInstruction(classOpcode, trueExpr, falseExpr, condExpr)
-    {
-        MOZ_ASSERT(condExpr->type() == MIRType::Int32);
-        MOZ_ASSERT(trueExpr->type() == falseExpr->type());
-        setResultType(trueExpr->type());
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(WasmSelect)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, trueExpr), (1, falseExpr), (2, condExpr))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    ALLOW_CLONE(MWasmSelect)
-};
-
-class MWasmReinterpret
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    MWasmReinterpret(MDefinition* val, MIRType toType)
-      : MUnaryInstruction(classOpcode, val)
-    {
-        switch (val->type()) {
-          case MIRType::Int32:   MOZ_ASSERT(toType == MIRType::Float32); break;
-          case MIRType::Float32: MOZ_ASSERT(toType == MIRType::Int32);   break;
-          case MIRType::Double:  MOZ_ASSERT(toType == MIRType::Int64);   break;
-          case MIRType::Int64:   MOZ_ASSERT(toType == MIRType::Double);  break;
-          default:              MOZ_CRASH("unexpected reinterpret conversion");
-        }
-        setMovable();
-        setResultType(toType);
-    }
-
-  public:
-    INSTRUCTION_HEADER(WasmReinterpret)
-    TRIVIAL_NEW_WRAPPERS
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins);
-    }
-
-    ALLOW_CLONE(MWasmReinterpret)
-};
-
-class MRotate
-  : public MBinaryInstruction,
-    public NoTypePolicy::Data
-{
-    bool isLeftRotate_;
-
-    MRotate(MDefinition* input, MDefinition* count, MIRType type, bool isLeftRotate)
-      : MBinaryInstruction(classOpcode, input, count), isLeftRotate_(isLeftRotate)
-    {
-        setMovable();
-        setResultType(type);
-    }
-
-  public:
-    INSTRUCTION_HEADER(Rotate)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, input), (1, count))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        return congruentIfOperandsEqual(ins) && ins->toRotate()->isLeftRotate() == isLeftRotate_;
-    }
-
-    bool isLeftRotate() const {
-        return isLeftRotate_;
-    }
-
-    ALLOW_CLONE(MRotate)
-};
-
-class MUnknownValue : public MNullaryInstruction
-{
-  protected:
-    MUnknownValue()
-      : MNullaryInstruction(classOpcode)
-    {
-        setResultType(MIRType::Value);
-    }
-
-  public:
-    INSTRUCTION_HEADER(UnknownValue)
-    TRIVIAL_NEW_WRAPPERS
-};
-
 #undef INSTRUCTION_HEADER
 
 void MUse::init(MDefinition* producer, MNode* consumer)
 {
     MOZ_ASSERT(!consumer_, "Initializing MUse that already has a consumer");
     MOZ_ASSERT(!producer_, "Initializing MUse that already has a producer");
     initUnchecked(producer, consumer);
 }
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -1,15 +1,16 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 
 #include "jit/BytecodeAnalysis.h"
 #include "jit/Ion.h"
 #include "jit/JitSpewer.h"
 #include "jit/MIR.h"
 #include "jit/MIRGenerator.h"
 #include "wasm/WasmTypes.h"
 
--- a/js/src/jit/MIRGraph.h
+++ b/js/src/jit/MIRGraph.h
@@ -8,16 +8,17 @@
 #define jit_MIRGraph_h
 
 // This file declares the data structures used to build a control-flow graph
 // containing MIR.
 
 #include "jit/FixedList.h"
 #include "jit/JitAllocPolicy.h"
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"    
 
 namespace js {
 namespace jit {
 
 class BytecodeAnalysis;
 class MBasicBlock;
 class MIRGraph;
 class MStart;
new file mode 100644
--- /dev/null
+++ b/js/src/jit/MIRInstruction.h
@@ -0,0 +1,10052 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_MIRInstruction_h
+#define jit_MIRInstruction_h
+
+// Generates an LSnapshot without further effect.
+class MStart : public MNullaryInstruction
+{
+    MStart()
+      : MNullaryInstruction(classOpcode)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(Start)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+// Instruction marking on entrypoint for on-stack replacement.
+// OSR may occur at loop headers (at JSOP_TRACE).
+// There is at most one MOsrEntry per MIRGraph.
+class MOsrEntry : public MNullaryInstruction
+{
+  protected:
+    MOsrEntry()
+      : MNullaryInstruction(classOpcode)
+    {
+        setResultType(MIRType::Pointer);
+    }
+
+  public:
+    INSTRUCTION_HEADER(OsrEntry)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+// No-op instruction. This cannot be moved or eliminated, and is intended for
+// anchoring resume points at arbitrary points in a block.
+class MNop : public MNullaryInstruction
+{
+  protected:
+    MNop()
+      : MNullaryInstruction(classOpcode)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(Nop)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    ALLOW_CLONE(MNop)
+};
+
+// A constant js::Value.
+class MConstant : public MNullaryInstruction
+{
+    struct Payload {
+        union {
+            bool b;
+            int32_t i32;
+            int64_t i64;
+            float f;
+            double d;
+            JSString* str;
+            JS::Symbol* sym;
+            JSObject* obj;
+            uint64_t asBits;
+        };
+        Payload() : asBits(0) {}
+    };
+
+    Payload payload_;
+
+    static_assert(sizeof(Payload) == sizeof(uint64_t),
+                  "asBits must be big enough for all payload bits");
+
+#ifdef DEBUG
+    void assertInitializedPayload() const;
+#else
+    void assertInitializedPayload() const {}
+#endif
+
+  protected:
+    MConstant(TempAllocator& alloc, const Value& v, CompilerConstraintList* constraints);
+    explicit MConstant(JSObject* obj);
+    explicit MConstant(float f);
+    explicit MConstant(int64_t i);
+
+  public:
+    INSTRUCTION_HEADER(Constant)
+    static MConstant* New(TempAllocator& alloc, const Value& v,
+                          CompilerConstraintList* constraints = nullptr);
+    static MConstant* New(TempAllocator::Fallible alloc, const Value& v,
+                          CompilerConstraintList* constraints = nullptr);
+    static MConstant* New(TempAllocator& alloc, const Value& v, MIRType type);
+    static MConstant* NewFloat32(TempAllocator& alloc, double d);
+    static MConstant* NewInt64(TempAllocator& alloc, int64_t i);
+    static MConstant* NewConstraintlessObject(TempAllocator& alloc, JSObject* v);
+    static MConstant* Copy(TempAllocator& alloc, MConstant* src) {
+        return new(alloc) MConstant(*src);
+    }
+
+    // Try to convert this constant to boolean, similar to js::ToBoolean.
+    // Returns false if the type is MIRType::Magic*.
+    bool MOZ_MUST_USE valueToBoolean(bool* res) const;
+
+    // Like valueToBoolean, but returns the result directly instead of using
+    // an outparam. Should not be used if this constant might be a magic value.
+    bool valueToBooleanInfallible() const {
+        bool res;
+        MOZ_ALWAYS_TRUE(valueToBoolean(&res));
+        return res;
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    HashNumber valueHash() const override;
+    bool congruentTo(const MDefinition* ins) const override;
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    MOZ_MUST_USE bool updateForReplacement(MDefinition* def) override {
+        MConstant* c = def->toConstant();
+        // During constant folding, we don't want to replace a float32
+        // value by a double value.
+        if (type() == MIRType::Float32)
+            return c->type() == MIRType::Float32;
+        if (type() == MIRType::Double)
+            return c->type() != MIRType::Float32;
+        return true;
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+    bool needTruncation(TruncateKind kind) override;
+    void truncate() override;
+
+    bool canProduceFloat32() const override;
+
+    ALLOW_CLONE(MConstant)
+
+    bool equals(const MConstant* other) const {
+        assertInitializedPayload();
+        return type() == other->type() && payload_.asBits == other->payload_.asBits;
+    }
+
+    bool toBoolean() const {
+        MOZ_ASSERT(type() == MIRType::Boolean);
+        return payload_.b;
+    }
+    int32_t toInt32() const {
+        MOZ_ASSERT(type() == MIRType::Int32);
+        return payload_.i32;
+    }
+    int64_t toInt64() const {
+        MOZ_ASSERT(type() == MIRType::Int64);
+        return payload_.i64;
+    }
+    bool isInt32(int32_t i) const {
+        return type() == MIRType::Int32 && payload_.i32 == i;
+    }
+    const double& toDouble() const {
+        MOZ_ASSERT(type() == MIRType::Double);
+        return payload_.d;
+    }
+    const float& toFloat32() const {
+        MOZ_ASSERT(type() == MIRType::Float32);
+        return payload_.f;
+    }
+    JSString* toString() const {
+        MOZ_ASSERT(type() == MIRType::String);
+        return payload_.str;
+    }
+    JS::Symbol* toSymbol() const {
+        MOZ_ASSERT(type() == MIRType::Symbol);
+        return payload_.sym;
+    }
+    JSObject& toObject() const {
+        MOZ_ASSERT(type() == MIRType::Object);
+        return *payload_.obj;
+    }
+    JSObject* toObjectOrNull() const {
+        if (type() == MIRType::Object)
+            return payload_.obj;
+        MOZ_ASSERT(type() == MIRType::Null);
+        return nullptr;
+    }
+
+    bool isTypeRepresentableAsDouble() const {
+        return IsTypeRepresentableAsDouble(type());
+    }
+    double numberToDouble() const {
+        MOZ_ASSERT(isTypeRepresentableAsDouble());
+        if (type() == MIRType::Int32)
+            return toInt32();
+        if (type() == MIRType::Double)
+            return toDouble();
+        return toFloat32();
+    }
+
+    // Convert this constant to a js::Value. Float32 constants will be stored
+    // as DoubleValue and NaNs are canonicalized. Callers must be careful: not
+    // all constants can be represented by js::Value (wasm supports int64).
+    Value toJSValue() const;
+
+    bool appendRoots(MRootList& roots) const override;
+};
+
+// Floating-point value as created by wasm. Just a constant value, used to
+// effectively inhibite all the MIR optimizations. This uses the same LIR nodes
+// as a MConstant of the same type would.
+class MWasmFloatConstant : public MNullaryInstruction
+{
+    union {
+        float f32_;
+        double f64_;
+        uint64_t bits_;
+    } u;
+
+    explicit MWasmFloatConstant(MIRType type)
+      : MNullaryInstruction(classOpcode)
+    {
+        u.bits_ = 0;
+        setResultType(type);
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmFloatConstant)
+
+    static MWasmFloatConstant* NewDouble(TempAllocator& alloc, double d) {
+        auto* ret = new(alloc) MWasmFloatConstant(MIRType::Double);
+        ret->u.f64_ = d;
+        return ret;
+    }
+
+    static MWasmFloatConstant* NewFloat32(TempAllocator& alloc, float f) {
+        auto* ret = new(alloc) MWasmFloatConstant(MIRType::Float32);
+        ret->u.f32_ = f;
+        return ret;
+    }
+
+    HashNumber valueHash() const override;
+    bool congruentTo(const MDefinition* ins) const override;
+    AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+    const double& toDouble() const {
+        MOZ_ASSERT(type() == MIRType::Double);
+        return u.f64_;
+    }
+    const float& toFloat32() const {
+        MOZ_ASSERT(type() == MIRType::Float32);
+        return u.f32_;
+    }
+};
+
+// A constant SIMD value.
+class MSimdConstant
+  : public MNullaryInstruction
+{
+    SimdConstant value_;
+
+  protected:
+    MSimdConstant(const SimdConstant& v, MIRType type)
+      : MNullaryInstruction(classOpcode),
+        value_(v)
+    {
+        MOZ_ASSERT(IsSimdType(type));
+        setMovable();
+        setResultType(type);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdConstant)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isSimdConstant())
+            return false;
+        // Bool32x4 and Int32x4 share the same underlying SimdConstant representation.
+        if (type() != ins->type())
+            return false;
+        return value() == ins->toSimdConstant()->value();
+    }
+
+    const SimdConstant& value() const {
+        return value_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    ALLOW_CLONE(MSimdConstant)
+};
+
+class MParameter : public MNullaryInstruction
+{
+    int32_t index_;
+
+    MParameter(int32_t index, TemporaryTypeSet* types)
+      : MNullaryInstruction(classOpcode),
+        index_(index)
+    {
+        setResultType(MIRType::Value);
+        setResultTypeSet(types);
+    }
+
+  public:
+    INSTRUCTION_HEADER(Parameter)
+    TRIVIAL_NEW_WRAPPERS
+
+    static const int32_t THIS_SLOT = -1;
+    int32_t index() const {
+        return index_;
+    }
+    void printOpcode(GenericPrinter& out) const override;
+
+    HashNumber valueHash() const override;
+    bool congruentTo(const MDefinition* ins) const override;
+};
+
+class MCallee : public MNullaryInstruction
+{
+  public:
+    MCallee()
+      : MNullaryInstruction(classOpcode)
+    {
+        setResultType(MIRType::Object);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Callee)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MIsConstructing : public MNullaryInstruction
+{
+  public:
+    MIsConstructing()
+      : MNullaryInstruction(classOpcode)
+    {
+        setResultType(MIRType::Boolean);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(IsConstructing)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MNewArrayCopyOnWrite : public MNullaryInstruction
+{
+    CompilerGCPointer<ArrayObject*> templateObject_;
+    gc::InitialHeap initialHeap_;
+
+    MNewArrayCopyOnWrite(TempAllocator& alloc, CompilerConstraintList* constraints,
+                         ArrayObject* templateObject, gc::InitialHeap initialHeap)
+      : MNullaryInstruction(classOpcode),
+        templateObject_(templateObject),
+        initialHeap_(initialHeap)
+    {
+        MOZ_ASSERT(!templateObject->isSingleton());
+        setResultType(MIRType::Object);
+        setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, templateObject));
+    }
+
+  public:
+    INSTRUCTION_HEADER(NewArrayCopyOnWrite)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+
+    ArrayObject* templateObject() const {
+        return templateObject_;
+    }
+
+    gc::InitialHeap initialHeap() const {
+        return initialHeap_;
+    }
+
+    virtual AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(templateObject_);
+    }
+};
+
+class MNewTypedObject : public MNullaryInstruction
+{
+    CompilerGCPointer<InlineTypedObject*> templateObject_;
+    gc::InitialHeap initialHeap_;
+
+    MNewTypedObject(TempAllocator& alloc, CompilerConstraintList* constraints,
+                    InlineTypedObject* templateObject,
+                    gc::InitialHeap initialHeap)
+      : MNullaryInstruction(classOpcode),
+        templateObject_(templateObject),
+        initialHeap_(initialHeap)
+    {
+        setResultType(MIRType::Object);
+        setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, templateObject));
+    }
+
+  public:
+    INSTRUCTION_HEADER(NewTypedObject)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+
+    InlineTypedObject* templateObject() const {
+        return templateObject_;
+    }
+
+    gc::InitialHeap initialHeap() const {
+        return initialHeap_;
+    }
+
+    virtual AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(templateObject_);
+    }
+};
+
+class MBail : public MNullaryInstruction
+{
+  protected:
+    explicit MBail(BailoutKind kind)
+      : MNullaryInstruction(classOpcode)
+    {
+        bailoutKind_ = kind;
+        setGuard();
+    }
+
+  private:
+    BailoutKind bailoutKind_;
+
+  public:
+    INSTRUCTION_HEADER(Bail)
+
+    static MBail*
+    New(TempAllocator& alloc, BailoutKind kind) {
+        return new(alloc) MBail(kind);
+    }
+    static MBail*
+    New(TempAllocator& alloc) {
+        return new(alloc) MBail(Bailout_Inevitable);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    BailoutKind bailoutKind() const {
+        return bailoutKind_;
+    }
+};
+
+// This class serve as a way to force the encoding of a snapshot, even if there
+// is no resume point using it.  This is useful to run MAssertRecoveredOnBailout
+// assertions.
+class MEncodeSnapshot : public MNullaryInstruction
+{
+  protected:
+    MEncodeSnapshot()
+      : MNullaryInstruction(classOpcode)
+    {
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(EncodeSnapshot)
+
+    static MEncodeSnapshot*
+    New(TempAllocator& alloc) {
+        return new(alloc) MEncodeSnapshot();
+    }
+};
+
+class MRunOncePrologue
+  : public MNullaryInstruction
+{
+  protected:
+    MRunOncePrologue()
+      : MNullaryInstruction(classOpcode)
+    {
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(RunOncePrologue)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+// Inline implementation of Math.random().
+class MRandom : public MNullaryInstruction
+{
+    MRandom()
+      : MNullaryInstruction(classOpcode)
+    {
+        setResultType(MIRType::Double);
+    }
+
+  public:
+    INSTRUCTION_HEADER(Random)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+
+    bool canRecoverOnBailout() const override {
+#ifdef JS_MORE_DETERMINISTIC
+        return false;
+#else
+        return true;
+#endif
+    }
+
+    ALLOW_CLONE(MRandom)
+};
+
+class MNullarySharedStub
+  : public MNullaryInstruction
+{
+    explicit MNullarySharedStub()
+      : MNullaryInstruction(classOpcode)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(NullarySharedStub)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+// Check the current frame for over-recursion past the global stack limit.
+class MCheckOverRecursed
+  : public MNullaryInstruction
+{
+    MCheckOverRecursed()
+      : MNullaryInstruction(classOpcode)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(CheckOverRecursed)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+// Check whether we need to fire the interrupt handler.
+class MInterruptCheck : public MNullaryInstruction
+{
+    MInterruptCheck()
+      : MNullaryInstruction(classOpcode)
+    {
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(InterruptCheck)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+// Unconditionally throw an uninitialized let error.
+class MThrowRuntimeLexicalError : public MNullaryInstruction
+{
+    unsigned errorNumber_;
+
+    explicit MThrowRuntimeLexicalError(unsigned errorNumber)
+      : MNullaryInstruction(classOpcode),
+        errorNumber_(errorNumber)
+    {
+        setGuard();
+        setResultType(MIRType::None);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ThrowRuntimeLexicalError)
+    TRIVIAL_NEW_WRAPPERS
+
+    unsigned errorNumber() const {
+        return errorNumber_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+// In the prologues of global and eval scripts, check for redeclarations.
+class MGlobalNameConflictsCheck : public MNullaryInstruction
+{
+    MGlobalNameConflictsCheck()
+      : MNullaryInstruction(classOpcode)
+    {
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(GlobalNameConflictsCheck)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+class MDefLexical
+  : public MNullaryInstruction
+{
+    CompilerPropertyName name_; // Target name to be defined.
+    unsigned attrs_; // Attributes to be set.
+
+  private:
+    MDefLexical(PropertyName* name, unsigned attrs)
+      : MNullaryInstruction(classOpcode),
+        name_(name),
+        attrs_(attrs)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(DefLexical)
+    TRIVIAL_NEW_WRAPPERS
+
+    PropertyName* name() const {
+        return name_;
+    }
+    unsigned attrs() const {
+        return attrs_;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(name_);
+    }
+};
+
+class MRegExp : public MNullaryInstruction
+{
+    CompilerGCPointer<RegExpObject*> source_;
+    bool mustClone_;
+    bool hasShared_;
+
+    MRegExp(TempAllocator& alloc, CompilerConstraintList* constraints, RegExpObject* source,
+            bool hasShared)
+      : MNullaryInstruction(classOpcode),
+        source_(source),
+        mustClone_(true),
+        hasShared_(hasShared)
+    {
+        setResultType(MIRType::Object);
+        setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, source));
+    }
+
+  public:
+    INSTRUCTION_HEADER(RegExp)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+
+    void setDoNotClone() {
+        mustClone_ = false;
+    }
+    bool mustClone() const {
+        return mustClone_;
+    }
+    bool hasShared() const {
+        return hasShared_;
+    }
+    RegExpObject* source() const {
+        return source_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(source_);
+    }
+};
+
+class MClassConstructor : public MNullaryInstruction
+{
+    jsbytecode* pc_;
+
+    explicit MClassConstructor(jsbytecode* pc)
+      : MNullaryInstruction(classOpcode),
+        pc_(pc)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ClassConstructor)
+    TRIVIAL_NEW_WRAPPERS
+
+    jsbytecode* pc() const {
+      return pc_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+// A constant value for some object's typed array elements.
+class MConstantElements : public MNullaryInstruction
+{
+    SharedMem<void*> value_;
+
+  protected:
+    explicit MConstantElements(SharedMem<void*> v)
+      : MNullaryInstruction(classOpcode),
+        value_(v)
+    {
+        setResultType(MIRType::Elements);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(ConstantElements)
+    TRIVIAL_NEW_WRAPPERS
+
+    SharedMem<void*> value() const {
+        return value_;
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    HashNumber valueHash() const override {
+        return (HashNumber)(size_t) value_.asValue();
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return ins->isConstantElements() && ins->toConstantElements()->value() == value();
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    ALLOW_CLONE(MConstantElements)
+};
+
+class MCallGetIntrinsicValue : public MNullaryInstruction
+{
+    CompilerPropertyName name_;
+
+    explicit MCallGetIntrinsicValue(PropertyName* name)
+      : MNullaryInstruction(classOpcode),
+        name_(name)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(CallGetIntrinsicValue)
+    TRIVIAL_NEW_WRAPPERS
+
+    PropertyName* name() const {
+        return name_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(name_);
+    }
+};
+
+class MArgumentsLength : public MNullaryInstruction
+{
+    MArgumentsLength()
+      : MNullaryInstruction(classOpcode)
+    {
+        setResultType(MIRType::Int32);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(ArgumentsLength)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        // Arguments |length| cannot be mutated by Ion Code.
+        return AliasSet::None();
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+};
+
+class MNewTarget : public MNullaryInstruction
+{
+    MNewTarget() : MNullaryInstruction(classOpcode) {
+        setResultType(MIRType::Value);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(NewTarget)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MNewNamedLambdaObject : public MNullaryInstruction
+{
+    CompilerGCPointer<LexicalEnvironmentObject*> templateObj_;
+
+    explicit MNewNamedLambdaObject(LexicalEnvironmentObject* templateObj)
+      : MNullaryInstruction(classOpcode),
+        templateObj_(templateObj)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(NewNamedLambdaObject)
+    TRIVIAL_NEW_WRAPPERS
+
+    LexicalEnvironmentObject* templateObj() {
+        return templateObj_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(templateObj_);
+    }
+};
+
+// Increase the warm-up counter of the provided script upon execution and test if
+// the warm-up counter surpasses the threshold. Upon hit it will recompile the
+// outermost script (i.e. not the inlined script).
+class MRecompileCheck : public MNullaryInstruction
+{
+  public:
+    enum RecompileCheckType {
+        RecompileCheck_OptimizationLevel,
+        RecompileCheck_Inlining
+    };
+
+  private:
+    JSScript* script_;
+    uint32_t recompileThreshold_;
+    bool forceRecompilation_;
+    bool increaseWarmUpCounter_;
+
+    MRecompileCheck(JSScript* script, uint32_t recompileThreshold, RecompileCheckType type)
+      : MNullaryInstruction(classOpcode),
+        script_(script),
+        recompileThreshold_(recompileThreshold)
+    {
+        switch (type) {
+          case RecompileCheck_OptimizationLevel:
+            forceRecompilation_ = false;
+            increaseWarmUpCounter_ = true;
+            break;
+          case RecompileCheck_Inlining:
+            forceRecompilation_ = true;
+            increaseWarmUpCounter_ = false;
+            break;
+          default:
+            MOZ_CRASH("Unexpected recompile check type");
+        }
+
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(RecompileCheck)
+    TRIVIAL_NEW_WRAPPERS
+
+    JSScript* script() const {
+        return script_;
+    }
+
+    uint32_t recompileThreshold() const {
+        return recompileThreshold_;
+    }
+
+    bool forceRecompilation() const {
+        return forceRecompilation_;
+    }
+
+    bool increaseWarmUpCounter() const {
+        return increaseWarmUpCounter_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MDebugger : public MNullaryInstruction
+{
+    MDebugger()
+      : MNullaryInstruction(classOpcode)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(Debugger)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+class MWasmParameter : public MNullaryInstruction
+{
+    ABIArg abi_;
+
+    MWasmParameter(ABIArg abi, MIRType mirType)
+      : MNullaryInstruction(classOpcode),
+        abi_(abi)
+    {
+        setResultType(mirType);
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmParameter)
+    TRIVIAL_NEW_WRAPPERS
+
+    ABIArg abi() const { return abi_; }
+};
+
+class MUnknownValue : public MNullaryInstruction
+{
+  protected:
+    MUnknownValue()
+      : MNullaryInstruction(classOpcode)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(UnknownValue)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+// Truncation barrier. This is intended for protecting its input against
+// follow-up truncation optimizations.
+class MLimitedTruncate
+  : public MUnaryInstruction,
+    public ConvertToInt32Policy<0>::Data
+{
+  public:
+    TruncateKind truncate_;
+    TruncateKind truncateLimit_;
+
+  protected:
+    MLimitedTruncate(MDefinition* input, TruncateKind limit)
+      : MUnaryInstruction(classOpcode, input),
+        truncate_(NoTruncate),
+        truncateLimit_(limit)
+    {
+        setResultType(MIRType::Int32);
+        setResultTypeSet(input->resultTypeSet());
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(LimitedTruncate)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+    bool needTruncation(TruncateKind kind) override;
+    TruncateKind operandTruncateKind(size_t index) const override;
+    TruncateKind truncateKind() const {
+        return truncate_;
+    }
+    void setTruncateKind(TruncateKind kind) {
+        truncate_ = kind;
+    }
+};
+
+// Generic constructor of SIMD values with identical lanes.
+class MSimdSplat
+  : public MUnaryInstruction,
+    public SimdScalarPolicy<0>::Data
+{
+  protected:
+    MSimdSplat(MDefinition* v, MIRType type)
+      : MUnaryInstruction(classOpcode, v)
+    {
+        MOZ_ASSERT(IsSimdType(type));
+        setMovable();
+        setResultType(type);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdSplat)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool canConsumeFloat32(MUse* use) const override {
+        return SimdTypeToLaneType(type()) == MIRType::Float32;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    ALLOW_CLONE(MSimdSplat)
+};
+
+// Converts all lanes of a given vector into the type of another vector
+class MSimdConvert
+  : public MUnaryInstruction,
+    public SimdPolicy<0>::Data
+{
+    // When either fromType or toType is an integer vector, should it be treated
+    // as signed or unsigned. Note that we don't support int-int conversions -
+    // use MSimdReinterpretCast for that.
+    SimdSign sign_;
+    wasm::BytecodeOffset bytecodeOffset_;
+
+    MSimdConvert(MDefinition* obj, MIRType toType, SimdSign sign,
+                 wasm::BytecodeOffset bytecodeOffset)
+      : MUnaryInstruction(classOpcode, obj), sign_(sign), bytecodeOffset_(bytecodeOffset)
+    {
+        MIRType fromType = obj->type();
+        MOZ_ASSERT(IsSimdType(fromType));
+        MOZ_ASSERT(IsSimdType(toType));
+        // All conversions are int <-> float, so signedness is required.
+        MOZ_ASSERT(sign != SimdSign::NotApplicable);
+
+        setResultType(toType);
+        specialization_ = fromType; // expects fromType as input
+
+        setMovable();
+        if (IsFloatingPointSimdType(fromType) && IsIntegerSimdType(toType)) {
+            // Does the extra range check => do not remove
+            setGuard();
+        }
+    }
+
+    static MSimdConvert* New(TempAllocator& alloc, MDefinition* obj, MIRType toType, SimdSign sign,
+                             wasm::BytecodeOffset bytecodeOffset)
+    {
+        return new (alloc) MSimdConvert(obj, toType, sign, bytecodeOffset);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdConvert)
+
+    // Create a MSimdConvert instruction and add it to the basic block.
+    // Possibly create and add an equivalent sequence of instructions instead if
+    // the current target doesn't support the requested conversion directly.
+    // Return the inserted MInstruction that computes the converted value.
+    static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* obj,
+                                      MIRType toType, SimdSign sign,
+                                      wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset());
+
+    SimdSign signedness() const {
+        return sign_;
+    }
+    wasm::BytecodeOffset bytecodeOffset() const {
+        return bytecodeOffset_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!congruentIfOperandsEqual(ins))
+            return false;
+        const MSimdConvert* other = ins->toSimdConvert();
+        return sign_ == other->sign_;
+    }
+    ALLOW_CLONE(MSimdConvert)
+};
+
+// Casts bits of a vector input to another SIMD type (doesn't generate code).
+class MSimdReinterpretCast
+  : public MUnaryInstruction,
+    public SimdPolicy<0>::Data
+{
+    MSimdReinterpretCast(MDefinition* obj, MIRType toType)
+      : MUnaryInstruction(classOpcode, obj)
+    {
+        MIRType fromType = obj->type();
+        MOZ_ASSERT(IsSimdType(fromType));
+        MOZ_ASSERT(IsSimdType(toType));
+        setMovable();
+        setResultType(toType);
+        specialization_ = fromType; // expects fromType as input
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdReinterpretCast)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    ALLOW_CLONE(MSimdReinterpretCast)
+};
+
+// Extracts a lane element from a given vector type, given by its lane symbol.
+//
+// For integer SIMD types, a SimdSign must be provided so the lane value can be
+// converted to a scalar correctly.
+class MSimdExtractElement
+  : public MUnaryInstruction,
+    public SimdPolicy<0>::Data
+{
+  protected:
+    unsigned lane_;
+    SimdSign sign_;
+
+    MSimdExtractElement(MDefinition* obj, MIRType laneType, unsigned lane, SimdSign sign)
+      : MUnaryInstruction(classOpcode, obj), lane_(lane), sign_(sign)
+    {
+        MIRType vecType = obj->type();
+        MOZ_ASSERT(IsSimdType(vecType));
+        MOZ_ASSERT(lane < SimdTypeToLength(vecType));
+        MOZ_ASSERT(!IsSimdType(laneType));
+        MOZ_ASSERT((sign != SimdSign::NotApplicable) == IsIntegerSimdType(vecType),
+                   "Signedness must be specified for integer SIMD extractLanes");
+        // The resulting type should match the lane type.
+        // Allow extracting boolean lanes directly into an Int32 (for wasm).
+        // Allow extracting Uint32 lanes into a double.
+        //
+        // We also allow extracting Uint32 lanes into a MIRType::Int32. This is
+        // equivalent to extracting the Uint32 lane to a double and then
+        // applying MTruncateToInt32, but it bypasses the conversion to/from
+        // double.
+        MOZ_ASSERT(SimdTypeToLaneType(vecType) == laneType ||
+                   (IsBooleanSimdType(vecType) && laneType == MIRType::Int32) ||
+                   (vecType == MIRType::Int32x4 && laneType == MIRType::Double &&
+                    sign == SimdSign::Unsigned));
+
+        setMovable();
+        specialization_ = vecType;
+        setResultType(laneType);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdExtractElement)
+    TRIVIAL_NEW_WRAPPERS
+
+    unsigned lane() const {
+        return lane_;
+    }
+
+    SimdSign signedness() const {
+        return sign_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isSimdExtractElement())
+            return false;
+        const MSimdExtractElement* other = ins->toSimdExtractElement();
+        if (other->lane_ != lane_ || other->sign_ != sign_)
+            return false;
+        return congruentIfOperandsEqual(other);
+    }
+    ALLOW_CLONE(MSimdExtractElement)
+};
+
+// Returns true if all lanes are true.
+class MSimdAllTrue
+  : public MUnaryInstruction,
+    public SimdPolicy<0>::Data
+{
+  protected:
+    explicit MSimdAllTrue(MDefinition* obj, MIRType result)
+      : MUnaryInstruction(classOpcode, obj)
+    {
+        MIRType simdType = obj->type();
+        MOZ_ASSERT(IsBooleanSimdType(simdType));
+        MOZ_ASSERT(result == MIRType::Boolean || result == MIRType::Int32);
+        setResultType(result);
+        specialization_ = simdType;
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdAllTrue)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    ALLOW_CLONE(MSimdAllTrue)
+};
+
+// Returns true if any lane is true.
+class MSimdAnyTrue
+  : public MUnaryInstruction,
+    public SimdPolicy<0>::Data
+{
+  protected:
+    explicit MSimdAnyTrue(MDefinition* obj, MIRType result)
+      : MUnaryInstruction(classOpcode, obj)
+    {
+        MIRType simdType = obj->type();
+        MOZ_ASSERT(IsBooleanSimdType(simdType));
+        MOZ_ASSERT(result == MIRType::Boolean || result == MIRType::Int32);
+        setResultType(result);
+        specialization_ = simdType;
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdAnyTrue)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    ALLOW_CLONE(MSimdAnyTrue)
+};
+
+// Applies a swizzle operation to the input, putting the input lanes as
+// indicated in the output register's lanes. This implements the SIMD.js
+// "swizzle" function, that takes one vector and an array of lane indexes.
+class MSimdSwizzle
+  : public MUnaryInstruction,
+    public MSimdShuffleBase,
+    public NoTypePolicy::Data
+{
+  protected:
+    MSimdSwizzle(MDefinition* obj, const uint8_t lanes[])
+      : MUnaryInstruction(classOpcode, obj), MSimdShuffleBase(lanes, obj->type())
+    {
+        for (unsigned i = 0; i < arity_; i++)
+            MOZ_ASSERT(lane(i) < arity_);
+        setResultType(obj->type());
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdSwizzle)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isSimdSwizzle())
+            return false;
+        const MSimdSwizzle* other = ins->toSimdSwizzle();
+        return sameLanes(other) && congruentIfOperandsEqual(other);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    ALLOW_CLONE(MSimdSwizzle)
+};
+
+class MSimdUnaryArith
+  : public MUnaryInstruction,
+    public SimdSameAsReturnedTypePolicy<0>::Data
+{
+  public:
+    enum Operation {
+#define OP_LIST_(OP) OP,
+        FOREACH_FLOAT_SIMD_UNOP(OP_LIST_)
+        neg,
+        not_
+#undef OP_LIST_
+    };
+
+    static const char* OperationName(Operation op) {
+        switch (op) {
+          case abs:                         return "abs";
+          case neg:                         return "neg";
+          case not_:                        return "not";
+          case reciprocalApproximation:     return "reciprocalApproximation";
+          case reciprocalSqrtApproximation: return "reciprocalSqrtApproximation";
+          case sqrt:                        return "sqrt";
+        }
+        MOZ_CRASH("unexpected operation");
+    }
+
+  private:
+    Operation operation_;
+
+    MSimdUnaryArith(MDefinition* def, Operation op)
+      : MUnaryInstruction(classOpcode, def), operation_(op)
+    {
+        MIRType type = def->type();
+        MOZ_ASSERT(IsSimdType(type));
+        MOZ_ASSERT_IF(IsIntegerSimdType(type), op == neg || op == not_);
+        setResultType(type);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdUnaryArith)
+    TRIVIAL_NEW_WRAPPERS
+
+    Operation operation() const { return operation_; }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins) && ins->toSimdUnaryArith()->operation() == operation();
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    ALLOW_CLONE(MSimdUnaryArith);
+};
+
+// Deep clone a constant JSObject.
+class MCloneLiteral
+  : public MUnaryInstruction,
+    public ObjectPolicy<0>::Data
+{
+  protected:
+    explicit MCloneLiteral(MDefinition* obj)
+      : MUnaryInstruction(classOpcode, obj)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(CloneLiteral)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+class MNewArray
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  private:
+    // Number of elements to allocate for the array.
+    uint32_t length_;
+
+    // Heap where the array should be allocated.
+    gc::InitialHeap initialHeap_;
+
+    // Whether values written to this array should be converted to double first.
+    bool convertDoubleElements_;
+
+    jsbytecode* pc_;
+
+    bool vmCall_;
+
+    MNewArray(TempAllocator& alloc, CompilerConstraintList* constraints, uint32_t length,
+              MConstant* templateConst, gc::InitialHeap initialHeap, jsbytecode* pc,
+              bool vmCall = false);
+
+  public:
+    INSTRUCTION_HEADER(NewArray)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+
+    static MNewArray* NewVM(TempAllocator& alloc, CompilerConstraintList* constraints,
+                            uint32_t length, MConstant* templateConst,
+                            gc::InitialHeap initialHeap, jsbytecode* pc)
+    {
+        return new(alloc) MNewArray(alloc, constraints, length, templateConst, initialHeap, pc,
+                                    true);
+    }
+
+    uint32_t length() const {
+        return length_;
+    }
+
+    JSObject* templateObject() const {
+        return getOperand(0)->toConstant()->toObjectOrNull();
+    }
+
+    gc::InitialHeap initialHeap() const {
+        return initialHeap_;
+    }
+
+    jsbytecode* pc() const {
+        return pc_;
+    }
+
+    bool isVMCall() const {
+        return vmCall_;
+    }
+
+    bool convertDoubleElements() const {
+        return convertDoubleElements_;
+    }
+
+    // NewArray is marked as non-effectful because all our allocations are
+    // either lazy when we are using "new Array(length)" or bounded by the
+    // script or the stack size when we are using "new Array(...)" or "[...]"
+    // notations.  So we might have to allocate the array twice if we bail
+    // during the computation of the first element of the square braket
+    // notation.
+    virtual AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        // The template object can safely be used in the recover instruction
+        // because it can never be mutated by any other function execution.
+        return templateObject() != nullptr;
+    }
+};
+
+class MNewArrayDynamicLength
+  : public MUnaryInstruction,
+    public IntPolicy<0>::Data
+{
+    CompilerObject templateObject_;
+    gc::InitialHeap initialHeap_;
+
+    MNewArrayDynamicLength(TempAllocator& alloc, CompilerConstraintList* constraints,
+                           JSObject* templateObject, gc::InitialHeap initialHeap,
+                           MDefinition* length)
+      : MUnaryInstruction(classOpcode, length),
+        templateObject_(templateObject),
+        initialHeap_(initialHeap)
+    {
+        setGuard(); // Need to throw if length is negative.
+        setResultType(MIRType::Object);
+        if (!templateObject->isSingleton())
+            setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, templateObject));
+    }
+
+  public:
+    INSTRUCTION_HEADER(NewArrayDynamicLength)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+    NAMED_OPERANDS((0, length))
+
+    JSObject* templateObject() const {
+        return templateObject_;
+    }
+    gc::InitialHeap initialHeap() const {
+        return initialHeap_;
+    }
+
+    virtual AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(templateObject_);
+    }
+};
+
+class MNewTypedArray
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    gc::InitialHeap initialHeap_;
+
+    MNewTypedArray(TempAllocator& alloc, CompilerConstraintList* constraints,
+                   MConstant* templateConst, gc::InitialHeap initialHeap)
+      : MUnaryInstruction(classOpcode, templateConst),
+        initialHeap_(initialHeap)
+    {
+        MOZ_ASSERT(!templateObject()->isSingleton());
+        setResultType(MIRType::Object);
+        setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, templateObject()));
+    }
+
+  public:
+    INSTRUCTION_HEADER(NewTypedArray)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+
+    TypedArrayObject* templateObject() const {
+        return &getOperand(0)->toConstant()->toObject().as<TypedArrayObject>();
+    }
+
+    gc::InitialHeap initialHeap() const {
+        return initialHeap_;
+    }
+
+    virtual AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+};
+
+class MNewTypedArrayDynamicLength
+  : public MUnaryInstruction,
+    public IntPolicy<0>::Data
+{
+    CompilerObject templateObject_;
+    gc::InitialHeap initialHeap_;
+
+    MNewTypedArrayDynamicLength(TempAllocator& alloc, CompilerConstraintList* constraints,
+                                JSObject* templateObject, gc::InitialHeap initialHeap,
+                                MDefinition* length)
+      : MUnaryInstruction(classOpcode, length),
+        templateObject_(templateObject),
+        initialHeap_(initialHeap)
+    {
+        setGuard(); // Need to throw if length is negative.
+        setResultType(MIRType::Object);
+        if (!templateObject->isSingleton())
+            setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, templateObject));
+    }
+
+  public:
+    INSTRUCTION_HEADER(NewTypedArrayDynamicLength)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+
+    MDefinition* length() const {
+        return getOperand(0);
+    }
+    JSObject* templateObject() const {
+        return templateObject_;
+    }
+    gc::InitialHeap initialHeap() const {
+        return initialHeap_;
+    }
+
+    virtual AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(templateObject_);
+    }
+};
+
+class MNewObject
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  public:
+    enum Mode { ObjectLiteral, ObjectCreate };
+
+  private:
+    gc::InitialHeap initialHeap_;
+    Mode mode_;
+    bool vmCall_;
+
+    MNewObject(TempAllocator& alloc, CompilerConstraintList* constraints, MConstant* templateConst,
+               gc::InitialHeap initialHeap, Mode mode, bool vmCall = false)
+      : MUnaryInstruction(classOpcode, templateConst),
+        initialHeap_(initialHeap),
+        mode_(mode),
+        vmCall_(vmCall)
+    {
+        MOZ_ASSERT_IF(mode != ObjectLiteral, templateObject());
+        setResultType(MIRType::Object);
+
+        if (JSObject* obj = templateObject())
+            setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, obj));
+
+        // The constant is kept separated in a MConstant, this way we can safely
+        // mark it during GC if we recover the object allocation.  Otherwise, by
+        // making it emittedAtUses, we do not produce register allocations for
+        // it and inline its content inside the code produced by the
+        // CodeGenerator.
+        if (templateConst->toConstant()->type() == MIRType::Object)
+            templateConst->setEmittedAtUses();
+    }
+
+  public:
+    INSTRUCTION_HEADER(NewObject)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+
+    static MNewObject* NewVM(TempAllocator& alloc, CompilerConstraintList* constraints,
+                             MConstant* templateConst, gc::InitialHeap initialHeap,
+                             Mode mode)
+    {
+        return new(alloc) MNewObject(alloc, constraints, templateConst, initialHeap, mode, true);
+    }
+
+    Mode mode() const {
+        return mode_;
+    }
+
+    JSObject* templateObject() const {
+        return getOperand(0)->toConstant()->toObjectOrNull();
+    }
+
+    gc::InitialHeap initialHeap() const {
+        return initialHeap_;
+    }
+
+    bool isVMCall() const {
+        return vmCall_;
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        // The template object can safely be used in the recover instruction
+        // because it can never be mutated by any other function execution.
+        return templateObject() != nullptr;
+    }
+};
+
+
+class MNewIterator
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  public:
+    enum Type {
+        ArrayIterator,
+        StringIterator,
+    };
+
+private:
+    Type type_;
+
+    MNewIterator(TempAllocator& alloc, CompilerConstraintList* constraints,
+                 MConstant* templateConst, Type type)
+      : MUnaryInstruction(classOpcode, templateConst),
+        type_(type)
+    {
+        setResultType(MIRType::Object);
+        setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, templateObject()));
+        templateConst->setEmittedAtUses();
+    }
+
+  public:
+    INSTRUCTION_HEADER(NewIterator)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+
+    Type type() const {
+        return type_;
+    }
+
+    JSObject* templateObject() {
+        return getOperand(0)->toConstant()->toObjectOrNull();
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+};
+
+class MTypedObjectDescr
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+  private:
+    explicit MTypedObjectDescr(MDefinition* object)
+      : MUnaryInstruction(classOpcode, object)
+    {
+        setResultType(MIRType::Object);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(TypedObjectDescr)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+};
+
+// Generic way for constructing a SIMD object in IonMonkey, this instruction
+// takes as argument a SIMD instruction and returns a new SIMD object which
+// corresponds to the MIRType of its operand.
+class MSimdBox
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  protected:
+    CompilerGCPointer<InlineTypedObject*> templateObject_;
+    SimdType simdType_;
+    gc::InitialHeap initialHeap_;
+
+    MSimdBox(TempAllocator& alloc,
+             CompilerConstraintList* constraints,
+             MDefinition* op,
+             InlineTypedObject* templateObject,
+             SimdType simdType,
+             gc::InitialHeap initialHeap)
+      : MUnaryInstruction(classOpcode, op),
+        templateObject_(templateObject),
+        simdType_(simdType),
+        initialHeap_(initialHeap)
+    {
+        MOZ_ASSERT(IsSimdType(op->type()));
+        setMovable();
+        setResultType(MIRType::Object);
+        if (constraints)
+            setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, templateObject));
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdBox)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+
+    InlineTypedObject* templateObject() const {
+        return templateObject_;
+    }
+
+    SimdType simdType() const {
+        return simdType_;
+    }
+
+    gc::InitialHeap initialHeap() const {
+        return initialHeap_;
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!congruentIfOperandsEqual(ins))
+            return false;
+        const MSimdBox* box = ins->toSimdBox();
+        if (box->simdType() != simdType())
+            return false;
+        MOZ_ASSERT(box->templateObject() == templateObject());
+        if (box->initialHeap() != initialHeap())
+            return false;
+        return true;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(templateObject_);
+    }
+};
+
+class MSimdUnbox
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+  protected:
+    SimdType simdType_;
+
+    MSimdUnbox(MDefinition* op, SimdType simdType)
+      : MUnaryInstruction(classOpcode, op),
+        simdType_(simdType)
+    {
+        MIRType type = SimdTypeToMIRType(simdType);
+        MOZ_ASSERT(IsSimdType(type));
+        setGuard();
+        setMovable();
+        setResultType(type);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdUnbox)
+    TRIVIAL_NEW_WRAPPERS
+    ALLOW_CLONE(MSimdUnbox)
+
+    SimdType simdType() const { return simdType_; }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!congruentIfOperandsEqual(ins))
+            return false;
+        return ins->toSimdUnbox()->simdType() == simdType();
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+};
+
+class MAssertRecoveredOnBailout
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  protected:
+    bool mustBeRecovered_;
+
+    MAssertRecoveredOnBailout(MDefinition* ins, bool mustBeRecovered)
+      : MUnaryInstruction(classOpcode, ins), mustBeRecovered_(mustBeRecovered)
+    {
+        setResultType(MIRType::Value);
+        setRecoveredOnBailout();
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(AssertRecoveredOnBailout)
+    TRIVIAL_NEW_WRAPPERS
+
+    // Needed to assert that float32 instructions are correctly recovered.
+    bool canConsumeFloat32(MUse* use) const override { return true; }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+};
+
+class MAssertFloat32
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  protected:
+    bool mustBeFloat32_;
+
+    MAssertFloat32(MDefinition* value, bool mustBeFloat32)
+      : MUnaryInstruction(classOpcode, value), mustBeFloat32_(mustBeFloat32)
+    {
+    }
+
+  public:
+    INSTRUCTION_HEADER(AssertFloat32)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool canConsumeFloat32(MUse* use) const override { return true; }
+
+    bool mustBeFloat32() const { return mustBeFloat32_; }
+};
+
+// Takes a typed value and returns an untyped value.
+class MBox
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    MBox(TempAllocator& alloc, MDefinition* ins)
+      : MUnaryInstruction(classOpcode, ins)
+    {
+        setResultType(MIRType::Value);
+        if (ins->resultTypeSet()) {
+            setResultTypeSet(ins->resultTypeSet());
+        } else if (ins->type() != MIRType::Value) {
+            TypeSet::Type ntype = ins->type() == MIRType::Object
+                                  ? TypeSet::AnyObjectType()
+                                  : TypeSet::PrimitiveType(ValueTypeFromMIRType(ins->type()));
+            setResultTypeSet(alloc.lifoAlloc()->new_<TemporaryTypeSet>(alloc.lifoAlloc(), ntype));
+        }
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Box)
+    static MBox* New(TempAllocator& alloc, MDefinition* ins)
+    {
+        // Cannot box a box.
+        MOZ_ASSERT(ins->type() != MIRType::Value);
+
+        return new(alloc) MBox(alloc, ins);
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    ALLOW_CLONE(MBox)
+};
+
+// Takes a typed value and checks if it is a certain type. If so, the payload
+// is unpacked and returned as that type. Otherwise, it is considered a
+// deoptimization.
+class MUnbox final : public MUnaryInstruction, public BoxInputsPolicy::Data
+{
+  public:
+    enum Mode {
+        Fallible,       // Check the type, and deoptimize if unexpected.
+        Infallible,     // Type guard is not necessary.
+        TypeBarrier     // Guard on the type, and act like a TypeBarrier on failure.
+    };
+
+  private:
+    Mode mode_;
+    BailoutKind bailoutKind_;
+
+    MUnbox(MDefinition* ins, MIRType type, Mode mode, BailoutKind kind, TempAllocator& alloc)
+      : MUnaryInstruction(classOpcode, ins),
+        mode_(mode)
+    {
+        // Only allow unboxing a non MIRType::Value when input and output types
+        // don't match. This is often used to force a bailout. Boxing happens
+        // during type analysis.
+        MOZ_ASSERT_IF(ins->type() != MIRType::Value, type != ins->type());
+
+        MOZ_ASSERT(type == MIRType::Boolean ||
+                   type == MIRType::Int32   ||
+                   type == MIRType::Double  ||
+                   type == MIRType::String  ||
+                   type == MIRType::Symbol  ||
+                   type == MIRType::Object);
+
+        TemporaryTypeSet* resultSet = ins->resultTypeSet();
+        if (resultSet && type == MIRType::Object)
+            resultSet = resultSet->cloneObjectsOnly(alloc.lifoAlloc());
+
+        setResultType(type);
+        setResultTypeSet(resultSet);
+        setMovable();
+
+        if (mode_ == TypeBarrier || mode_ == Fallible)
+            setGuard();
+
+        bailoutKind_ = kind;
+    }
+  public:
+    INSTRUCTION_HEADER(Unbox)
+    static MUnbox* New(TempAllocator& alloc, MDefinition* ins, MIRType type, Mode mode)
+    {
+        // Unless we were given a specific BailoutKind, pick a default based on
+        // the type we expect.
+        BailoutKind kind;
+        switch (type) {
+          case MIRType::Boolean:
+            kind = Bailout_NonBooleanInput;
+            break;
+          case MIRType::Int32:
+            kind = Bailout_NonInt32Input;
+            break;
+          case MIRType::Double:
+            kind = Bailout_NonNumericInput; // Int32s are fine too
+            break;
+          case MIRType::String:
+            kind = Bailout_NonStringInput;
+            break;
+          case MIRType::Symbol:
+            kind = Bailout_NonSymbolInput;
+            break;
+          case MIRType::Object:
+            kind = Bailout_NonObjectInput;
+            break;
+          default:
+            MOZ_CRASH("Given MIRType cannot be unboxed.");
+        }
+
+        return new(alloc) MUnbox(ins, type, mode, kind, alloc);
+    }
+
+    static MUnbox* New(TempAllocator& alloc, MDefinition* ins, MIRType type, Mode mode,
+                       BailoutKind kind)
+    {
+        return new(alloc) MUnbox(ins, type, mode, kind, alloc);
+    }
+
+    Mode mode() const {
+        return mode_;
+    }
+    BailoutKind bailoutKind() const {
+        // If infallible, no bailout should be generated.
+        MOZ_ASSERT(fallible());
+        return bailoutKind_;
+    }
+    bool fallible() const {
+        return mode() != Infallible;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isUnbox() || ins->toUnbox()->mode() != mode())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    void printOpcode(GenericPrinter& out) const override;
+    void makeInfallible() {
+        // Should only be called if we're already Infallible or TypeBarrier
+        MOZ_ASSERT(mode() != Fallible);
+        mode_ = Infallible;
+    }
+
+    ALLOW_CLONE(MUnbox)
+};
+
+class MGuardObject
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MGuardObject(MDefinition* ins)
+      : MUnaryInstruction(classOpcode, ins)
+    {
+        setGuard();
+        setMovable();
+        setResultType(MIRType::Object);
+        setResultTypeSet(ins->resultTypeSet());
+    }
+
+  public:
+    INSTRUCTION_HEADER(GuardObject)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MGuardString
+  : public MUnaryInstruction,
+    public StringPolicy<0>::Data
+{
+    explicit MGuardString(MDefinition* ins)
+      : MUnaryInstruction(classOpcode, ins)
+    {
+        setGuard();
+        setMovable();
+        setResultType(MIRType::String);
+    }
+
+  public:
+    INSTRUCTION_HEADER(GuardString)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MPolyInlineGuard
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MPolyInlineGuard(MDefinition* ins)
+      : MUnaryInstruction(classOpcode, ins)
+    {
+        setGuard();
+        setResultType(MIRType::Object);
+        setResultTypeSet(ins->resultTypeSet());
+    }
+
+  public:
+    INSTRUCTION_HEADER(PolyInlineGuard)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MAssertRange
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    // This is the range checked by the assertion. Don't confuse this with the
+    // range_ member or the range() accessor. Since MAssertRange doesn't return
+    // a value, it doesn't use those.
+    const Range* assertedRange_;
+
+    MAssertRange(MDefinition* ins, const Range* assertedRange)
+      : MUnaryInstruction(classOpcode, ins), assertedRange_(assertedRange)
+    {
+        setGuard();
+        setResultType(MIRType::None);
+    }
+
+  public:
+    INSTRUCTION_HEADER(AssertRange)
+    TRIVIAL_NEW_WRAPPERS
+
+    const Range* assertedRange() const {
+        return assertedRange_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+};
+
+// Caller-side allocation of |this| for |new|:
+// Given a templateobject, construct |this| for JSOP_NEW
+class MCreateThisWithTemplate
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    gc::InitialHeap initialHeap_;
+
+    MCreateThisWithTemplate(TempAllocator& alloc, CompilerConstraintList* constraints,
+                            MConstant* templateConst, gc::InitialHeap initialHeap)
+      : MUnaryInstruction(classOpcode, templateConst),
+        initialHeap_(initialHeap)
+    {
+        setResultType(MIRType::Object);
+        setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, templateObject()));
+    }
+
+  public:
+    INSTRUCTION_HEADER(CreateThisWithTemplate)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+
+    // Template for |this|, provided by TI.
+    JSObject* templateObject() const {
+        return &getOperand(0)->toConstant()->toObject();
+    }
+
+    gc::InitialHeap initialHeap() const {
+        return initialHeap_;
+    }
+
+    // Although creation of |this| modifies global state, it is safely repeatable.
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override;
+};
+
+// Eager initialization of arguments object.
+class MCreateArgumentsObject
+  : public MUnaryInstruction,
+    public ObjectPolicy<0>::Data
+{
+    CompilerGCPointer<ArgumentsObject*> templateObj_;
+
+    MCreateArgumentsObject(MDefinition* callObj, ArgumentsObject* templateObj)
+      : MUnaryInstruction(classOpcode, callObj),
+        templateObj_(templateObj)
+    {
+        setResultType(MIRType::Object);
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(CreateArgumentsObject)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, getCallObject))
+
+    ArgumentsObject* templateObject() const {
+        return templateObj_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(templateObj_);
+    }
+};
+
+class MGetArgumentsObjectArg
+  : public MUnaryInstruction,
+    public ObjectPolicy<0>::Data
+{
+    size_t argno_;
+
+    MGetArgumentsObjectArg(MDefinition* argsObject, size_t argno)
+      : MUnaryInstruction(classOpcode, argsObject),
+        argno_(argno)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(GetArgumentsObjectArg)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, getArgsObject))
+
+    size_t argno() const {
+        return argno_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::Any);
+    }
+};
+
+// Converts a uint32 to a double (coming from wasm).
+class MWasmUnsignedToDouble
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    explicit MWasmUnsignedToDouble(MDefinition* def)
+      : MUnaryInstruction(classOpcode, def)
+    {
+        setResultType(MIRType::Double);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmUnsignedToDouble)
+    TRIVIAL_NEW_WRAPPERS
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+// Converts a uint32 to a float32 (coming from wasm).
+class MWasmUnsignedToFloat32
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    explicit MWasmUnsignedToFloat32(MDefinition* def)
+      : MUnaryInstruction(classOpcode, def)
+    {
+        setResultType(MIRType::Float32);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmUnsignedToFloat32)
+    TRIVIAL_NEW_WRAPPERS
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool canProduceFloat32() const override { return true; }
+};
+
+class MWrapInt64ToInt32
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    bool bottomHalf_;
+
+    explicit MWrapInt64ToInt32(MDefinition* def, bool bottomHalf = true)
+      : MUnaryInstruction(classOpcode, def),
+        bottomHalf_(bottomHalf)
+    {
+        setResultType(MIRType::Int32);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(WrapInt64ToInt32)
+    TRIVIAL_NEW_WRAPPERS
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isWrapInt64ToInt32())
+            return false;
+        if (ins->toWrapInt64ToInt32()->bottomHalf() != bottomHalf())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool bottomHalf() const {
+        return bottomHalf_;
+    }
+};
+
+class MExtendInt32ToInt64
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    bool isUnsigned_;
+
+    MExtendInt32ToInt64(MDefinition* def, bool isUnsigned)
+      : MUnaryInstruction(classOpcode, def),
+        isUnsigned_(isUnsigned)
+    {
+        setResultType(MIRType::Int64);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(ExtendInt32ToInt64)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool isUnsigned() const { return isUnsigned_; }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isExtendInt32ToInt64())
+            return false;
+        if (ins->toExtendInt32ToInt64()->isUnsigned_ != isUnsigned_)
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MWasmTruncateToInt64
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    bool isUnsigned_;
+    wasm::BytecodeOffset bytecodeOffset_;
+
+    MWasmTruncateToInt64(MDefinition* def, bool isUnsigned, wasm::BytecodeOffset bytecodeOffset)
+      : MUnaryInstruction(classOpcode, def),
+        isUnsigned_(isUnsigned),
+        bytecodeOffset_(bytecodeOffset)
+    {
+        setResultType(MIRType::Int64);
+        setGuard(); // neither removable nor movable because of possible side-effects.
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmTruncateToInt64)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool isUnsigned() const { return isUnsigned_; }
+    wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins) &&
+               ins->toWasmTruncateToInt64()->isUnsigned() == isUnsigned_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+// Truncate a value to an int32, with wasm semantics: this will trap when the
+// value is out of range.
+class MWasmTruncateToInt32
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    bool isUnsigned_;
+    wasm::BytecodeOffset bytecodeOffset_;
+
+    explicit MWasmTruncateToInt32(MDefinition* def, bool isUnsigned,
+                                  wasm::BytecodeOffset bytecodeOffset)
+      : MUnaryInstruction(classOpcode, def),
+        isUnsigned_(isUnsigned), bytecodeOffset_(bytecodeOffset)
+    {
+        setResultType(MIRType::Int32);
+        setGuard(); // neither removable nor movable because of possible side-effects.
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmTruncateToInt32)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool isUnsigned() const {
+        return isUnsigned_;
+    }
+    wasm::BytecodeOffset bytecodeOffset() const {
+        return bytecodeOffset_;
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins) &&
+               ins->toWasmTruncateToInt32()->isUnsigned() == isUnsigned_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MInt64ToFloatingPoint
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    bool isUnsigned_;
+    wasm::BytecodeOffset bytecodeOffset_;
+
+    MInt64ToFloatingPoint(MDefinition* def, MIRType type, wasm::BytecodeOffset bytecodeOffset,
+                          bool isUnsigned)
+      : MUnaryInstruction(classOpcode, def),
+        isUnsigned_(isUnsigned),
+        bytecodeOffset_(bytecodeOffset)
+    {
+        MOZ_ASSERT(IsFloatingPointType(type));
+        setResultType(type);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Int64ToFloatingPoint)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool isUnsigned() const { return isUnsigned_; }
+    wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isInt64ToFloatingPoint())
+            return false;
+        if (ins->toInt64ToFloatingPoint()->isUnsigned_ != isUnsigned_)
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+// Converts a primitive (either typed or untyped) to an int32. If the input is
+// not primitive at runtime, a bailout occurs. If the input cannot be converted
+// to an int32 without loss (i.e. "5.5" or undefined) then a bailout occurs.
+class MToInt32
+  : public MUnaryInstruction,
+    public ToInt32Policy::Data
+{
+    bool canBeNegativeZero_;
+    MacroAssembler::IntConversionInputKind conversion_;
+
+    explicit MToInt32(MDefinition* def, MacroAssembler::IntConversionInputKind conversion =
+                                            MacroAssembler::IntConversion_Any)
+      : MUnaryInstruction(classOpcode, def),
+        canBeNegativeZero_(true),
+        conversion_(conversion)
+    {
+        setResultType(MIRType::Int32);
+        setMovable();
+
+        // An object might have "valueOf", which means it is effectful.
+        // ToNumber(symbol) throws.
+        if (def->mightBeType(MIRType::Object) || def->mightBeType(MIRType::Symbol))
+            setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(ToInt32)
+    TRIVIAL_NEW_WRAPPERS
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    // this only has backwards information flow.
+    void analyzeEdgeCasesBackward() override;
+
+    bool canBeNegativeZero() const {
+        return canBeNegativeZero_;
+    }
+    void setCanBeNegativeZero(bool negativeZero) {
+        canBeNegativeZero_ = negativeZero;
+    }
+
+    MacroAssembler::IntConversionInputKind conversion() const {
+        return conversion_;
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isToInt32() || ins->toToInt32()->conversion() != conversion())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    void computeRange(TempAllocator& alloc) override;
+    void collectRangeInfoPreTrunc() override;
+
+#ifdef DEBUG
+    bool isConsistentFloat32Use(MUse* use) const override { return true; }
+#endif
+
+    ALLOW_CLONE(MToInt32)
+};
+
+// Converts a value or typed input to a truncated int32, for use with bitwise
+// operations. This is an infallible ValueToECMAInt32.
+class MTruncateToInt32
+  : public MUnaryInstruction,
+    public ToInt32Policy::Data
+{
+    wasm::BytecodeOffset bytecodeOffset_;
+
+    explicit MTruncateToInt32(MDefinition* def,
+                              wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset())
+      : MUnaryInstruction(classOpcode, def),
+        bytecodeOffset_(bytecodeOffset)
+    {
+        setResultType(MIRType::Int32);
+        setMovable();
+
+        // An object might have "valueOf", which means it is effectful.
+        // ToInt32(symbol) throws.
+        if (def->mightBeType(MIRType::Object) || def->mightBeType(MIRType::Symbol))
+            setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(TruncateToInt32)
+    TRIVIAL_NEW_WRAPPERS
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+    TruncateKind operandTruncateKind(size_t index) const override;
+# ifdef DEBUG
+    bool isConsistentFloat32Use(MUse* use) const override {
+        return true;
+    }
+#endif
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return input()->type() < MIRType::Symbol;
+    }
+
+    wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+
+    ALLOW_CLONE(MTruncateToInt32)
+};
+
+// Converts any type to a string
+class MToString :
+  public MUnaryInstruction,
+  public ToStringPolicy::Data
+{
+    explicit MToString(MDefinition* def)
+      : MUnaryInstruction(classOpcode, def)
+    {
+        setResultType(MIRType::String);
+        setMovable();
+
+        // Objects might override toString and Symbols throw. We bailout in
+        // those cases and run side-effects in baseline instead.
+        if (def->mightBeType(MIRType::Object) || def->mightBeType(MIRType::Symbol))
+            setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(ToString)
+    TRIVIAL_NEW_WRAPPERS
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool fallible() const {
+        return input()->mightBeType(MIRType::Object) ||
+               input()->mightBeType(MIRType::Symbol);
+    }
+
+    ALLOW_CLONE(MToString)
+};
+
+// Converts any type to an object, throwing on null or undefined.
+class MToObject :
+  public MUnaryInstruction,
+  public BoxInputsPolicy::Data
+{
+    explicit MToObject(MDefinition* def)
+      : MUnaryInstruction(classOpcode, def)
+    {
+        setResultType(MIRType::Object);
+        setGuard(); // Throws on null or undefined.
+    }
+
+  public:
+    INSTRUCTION_HEADER(ToObject)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    ALLOW_CLONE(MToObject)
+};
+
+// Converts any type to an object or null value, throwing on undefined.
+class MToObjectOrNull :
+  public MUnaryInstruction,
+  public BoxInputsPolicy::Data
+{
+    explicit MToObjectOrNull(MDefinition* def)
+      : MUnaryInstruction(classOpcode, def)
+    {
+        setResultType(MIRType::ObjectOrNull);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(ToObjectOrNull)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    ALLOW_CLONE(MToObjectOrNull)
+};
+
+class MBitNot
+  : public MUnaryInstruction,
+    public BitwisePolicy::Data
+{
+  protected:
+    explicit MBitNot(MDefinition* input)
+      : MUnaryInstruction(classOpcode, input)
+    {
+        specialization_ = MIRType::None;
+        setResultType(MIRType::Int32);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(BitNot)
+    TRIVIAL_NEW_WRAPPERS
+
+    static MBitNot* NewInt32(TempAllocator& alloc, MDefinition* input);
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    void setSpecialization(MIRType type) {
+        specialization_ = type;
+        setResultType(type);
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        if (specialization_ == MIRType::None)
+            return AliasSet::Store(AliasSet::Any);
+        return AliasSet::None();
+    }
+    void computeRange(TempAllocator& alloc) override;
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return specialization_ != MIRType::None;
+    }
+
+    ALLOW_CLONE(MBitNot)
+};
+
+class MTypeOf
+  : public MUnaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    MIRType inputType_;
+    bool inputMaybeCallableOrEmulatesUndefined_;
+
+    MTypeOf(MDefinition* def, MIRType inputType)
+      : MUnaryInstruction(classOpcode, def), inputType_(inputType),
+        inputMaybeCallableOrEmulatesUndefined_(true)
+    {
+        setResultType(MIRType::String);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(TypeOf)
+    TRIVIAL_NEW_WRAPPERS
+
+    MIRType inputType() const {
+        return inputType_;
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    void cacheInputMaybeCallableOrEmulatesUndefined(CompilerConstraintList* constraints);
+
+    bool inputMaybeCallableOrEmulatesUndefined() const {
+        return inputMaybeCallableOrEmulatesUndefined_;
+    }
+    void markInputNotCallableOrEmulatesUndefined() {
+        inputMaybeCallableOrEmulatesUndefined_ = false;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isTypeOf())
+            return false;
+        if (inputType() != ins->toTypeOf()->inputType())
+            return false;
+        if (inputMaybeCallableOrEmulatesUndefined() !=
+            ins->toTypeOf()->inputMaybeCallableOrEmulatesUndefined())
+        {
+            return false;
+        }
+        return congruentIfOperandsEqual(ins);
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+};
+
+class MToAsync
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MToAsync(MDefinition* unwrapped)
+      : MUnaryInstruction(classOpcode, unwrapped)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ToAsync)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+class MToAsyncGen
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MToAsyncGen(MDefinition* unwrapped)
+      : MUnaryInstruction(classOpcode, unwrapped)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ToAsyncGen)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+class MToAsyncIter
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MToAsyncIter(MDefinition* unwrapped)
+      : MUnaryInstruction(classOpcode, unwrapped)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ToAsyncIter)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+class MToId
+  : public MUnaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    explicit MToId(MDefinition* index)
+      : MUnaryInstruction(classOpcode, index)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ToId)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+class MSignExtendInt32
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  public:
+    enum Mode {
+        Byte,
+        Half
+    };
+
+  private:
+    Mode mode_;
+
+    MSignExtendInt32(MDefinition* op, Mode mode)
+      : MUnaryInstruction(classOpcode, op), mode_(mode)
+    {
+        setResultType(MIRType::Int32);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(SignExtendInt32)
+    TRIVIAL_NEW_WRAPPERS
+
+    Mode mode() const { return mode_; }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!congruentIfOperandsEqual(ins))
+            return false;
+        return ins->isSignExtendInt32() && ins->toSignExtendInt32()->mode_ == mode_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MSignExtendInt32)
+};
+
+class MSignExtendInt64
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  public:
+    enum Mode {
+        Byte,
+        Half,
+        Word
+    };
+
+  private:
+    Mode mode_;
+
+    MSignExtendInt64(MDefinition* op, Mode mode)
+      : MUnaryInstruction(classOpcode, op), mode_(mode)
+    {
+        setResultType(MIRType::Int64);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(SignExtendInt64)
+    TRIVIAL_NEW_WRAPPERS
+
+    Mode mode() const { return mode_; }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!congruentIfOperandsEqual(ins))
+            return false;
+        return ins->isSignExtendInt64() && ins->toSignExtendInt64()->mode_ == mode_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    ALLOW_CLONE(MSignExtendInt64)
+};
+
+class MAbs
+  : public MUnaryInstruction,
+    public ArithPolicy::Data
+{
+    bool implicitTruncate_;
+
+    MAbs(MDefinition* num, MIRType type)
+      : MUnaryInstruction(classOpcode, num),
+        implicitTruncate_(false)
+    {
+        MOZ_ASSERT(IsNumberType(type));
+        setResultType(type);
+        setMovable();
+        specialization_ = type;
+    }
+
+  public:
+    INSTRUCTION_HEADER(Abs)
+    TRIVIAL_NEW_WRAPPERS
+
+    static MAbs* NewWasm(TempAllocator& alloc, MDefinition* num, MIRType type) {
+        auto* ins = new(alloc) MAbs(num, type);
+        if (type == MIRType::Int32)
+            ins->implicitTruncate_ = true;
+        return ins;
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    bool fallible() const;
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    void computeRange(TempAllocator& alloc) override;
+    bool isFloat32Commutative() const override { return true; }
+    void trySpecializeFloat32(TempAllocator& alloc) override;
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MAbs)
+};
+
+class MClz
+  : public MUnaryInstruction
+  , public BitwisePolicy::Data
+{
+    bool operandIsNeverZero_;
+
+    explicit MClz(MDefinition* num, MIRType type)
+      : MUnaryInstruction(classOpcode, num),
+        operandIsNeverZero_(false)
+    {
+        MOZ_ASSERT(IsIntType(type));
+        MOZ_ASSERT(IsNumberType(num->type()));
+        specialization_ = type;
+        setResultType(type);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Clz)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, num))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool operandIsNeverZero() const {
+        return operandIsNeverZero_;
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    void computeRange(TempAllocator& alloc) override;
+    void collectRangeInfoPreTrunc() override;
+};
+
+class MCtz
+  : public MUnaryInstruction
+  , public BitwisePolicy::Data
+{
+    bool operandIsNeverZero_;
+
+    explicit MCtz(MDefinition* num, MIRType type)
+      : MUnaryInstruction(classOpcode, num),
+        operandIsNeverZero_(false)
+    {
+        MOZ_ASSERT(IsIntType(type));
+        MOZ_ASSERT(IsNumberType(num->type()));
+        specialization_ = type;
+        setResultType(type);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Ctz)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, num))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool operandIsNeverZero() const {
+        return operandIsNeverZero_;
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    void computeRange(TempAllocator& alloc) override;
+    void collectRangeInfoPreTrunc() override;
+};
+
+class MPopcnt
+  : public MUnaryInstruction
+  , public BitwisePolicy::Data
+{
+    explicit MPopcnt(MDefinition* num, MIRType type)
+      : MUnaryInstruction(classOpcode, num)
+    {
+        MOZ_ASSERT(IsNumberType(num->type()));
+        MOZ_ASSERT(IsIntType(type));
+        specialization_ = type;
+        setResultType(type);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Popcnt)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, num))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    void computeRange(TempAllocator& alloc) override;
+};
+
+// Inline implementation of Math.sqrt().
+class MSqrt
+  : public MUnaryInstruction,
+    public FloatingPointPolicy<0>::Data
+{
+    MSqrt(MDefinition* num, MIRType type)
+      : MUnaryInstruction(classOpcode, num)
+    {
+        setResultType(type);
+        specialization_ = type;
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Sqrt)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    void computeRange(TempAllocator& alloc) override;
+
+    bool isFloat32Commutative() const override { return true; }
+    void trySpecializeFloat32(TempAllocator& alloc) override;
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MSqrt)
+};
+
+// Inline implementation of Math.pow(x, 0.5), which subtly differs from Math.sqrt(x).
+class MPowHalf
+  : public MUnaryInstruction,
+    public DoublePolicy<0>::Data
+{
+    bool operandIsNeverNegativeInfinity_;
+    bool operandIsNeverNegativeZero_;
+    bool operandIsNeverNaN_;
+
+    explicit MPowHalf(MDefinition* input)
+      : MUnaryInstruction(classOpcode, input),
+        operandIsNeverNegativeInfinity_(false),
+        operandIsNeverNegativeZero_(false),
+        operandIsNeverNaN_(false)
+    {
+        setResultType(MIRType::Double);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(PowHalf)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    bool operandIsNeverNegativeInfinity() const {
+        return operandIsNeverNegativeInfinity_;
+    }
+    bool operandIsNeverNegativeZero() const {
+        return operandIsNeverNegativeZero_;
+    }
+    bool operandIsNeverNaN() const {
+        return operandIsNeverNaN_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    void collectRangeInfoPreTrunc() override;
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MPowHalf)
+};
+
+class MMathFunction
+  : public MUnaryInstruction,
+    public FloatingPointPolicy<0>::Data
+{
+  public:
+    enum Function {
+        Log,
+        Sin,
+        Cos,
+        Exp,
+        Tan,
+        ACos,
+        ASin,
+        ATan,
+        Log10,
+        Log2,
+        Log1P,
+        ExpM1,
+        CosH,
+        SinH,
+        TanH,
+        ACosH,
+        ASinH,
+        ATanH,
+        Sign,
+        Trunc,
+        Cbrt,
+        Floor,
+        Ceil,
+        Round
+    };
+
+  private:
+    Function function_;
+    const MathCache* cache_;
+
+    // A nullptr cache means this function will neither access nor update the cache.
+    MMathFunction(MDefinition* input, Function function, const MathCache* cache)
+      : MUnaryInstruction(classOpcode, input), function_(function), cache_(cache)
+    {
+        setResultType(MIRType::Double);
+        specialization_ = MIRType::Double;
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(MathFunction)
+    TRIVIAL_NEW_WRAPPERS
+
+    Function function() const {
+        return function_;
+    }
+    const MathCache* cache() const {
+        return cache_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isMathFunction())
+            return false;
+        if (ins->toMathFunction()->function() != function())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    static const char* FunctionName(Function function);
+
+    bool isFloat32Commutative() const override {
+        return function_ == Floor || function_ == Ceil || function_ == Round;
+    }
+    void trySpecializeFloat32(TempAllocator& alloc) override;
+    void computeRange(TempAllocator& alloc) override;
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        if (input()->type() == MIRType::SinCosDouble)
+            return false;
+        switch(function_) {
+          case Sin:
+          case Log:
+          case Ceil:
+          case Floor:
+          case Round:
+            return true;
+          default:
+            return false;
+        }
+    }
+
+    ALLOW_CLONE(MMathFunction)
+};
+
+class MFromCharCode
+  : public MUnaryInstruction,
+    public IntPolicy<0>::Data
+{
+    explicit MFromCharCode(MDefinition* code)
+      : MUnaryInstruction(classOpcode, code)
+    {
+        setMovable();
+        setResultType(MIRType::String);
+    }
+
+  public:
+    INSTRUCTION_HEADER(FromCharCode)
+    TRIVIAL_NEW_WRAPPERS
+
+    virtual AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MFromCharCode)
+};
+
+class MFromCodePoint
+  : public MUnaryInstruction,
+    public IntPolicy<0>::Data
+{
+    explicit MFromCodePoint(MDefinition* codePoint)
+      : MUnaryInstruction(classOpcode, codePoint)
+    {
+        setGuard(); // throws on invalid code point
+        setMovable();
+        setResultType(MIRType::String);
+    }
+
+  public:
+    INSTRUCTION_HEADER(FromCodePoint)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    ALLOW_CLONE(MFromCodePoint)
+};
+
+class MStringConvertCase
+  : public MUnaryInstruction,
+    public StringPolicy<0>::Data
+{
+  public:
+    enum Mode { LowerCase, UpperCase };
+
+  private:
+    Mode mode_;
+
+    MStringConvertCase(MDefinition* string, Mode mode)
+      : MUnaryInstruction(classOpcode, string), mode_(mode)
+    {
+        setResultType(MIRType::String);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(StringConvertCase)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, string))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins) && ins->toStringConvertCase()->mode() == mode();
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+    Mode mode() const {
+        return mode_;
+    }
+};
+
+class MSinCos
+  : public MUnaryInstruction,
+    public FloatingPointPolicy<0>::Data
+{
+    const MathCache* cache_;
+
+    MSinCos(MDefinition *input, const MathCache *cache)
+      : MUnaryInstruction(classOpcode, input),
+        cache_(cache)
+    {
+        setResultType(MIRType::SinCosDouble);
+        specialization_ = MIRType::Double;
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(SinCos)
+
+    static MSinCos *New(TempAllocator &alloc, MDefinition *input, const MathCache *cache)
+    {
+        return new (alloc) MSinCos(input, cache);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool congruentTo(const MDefinition *ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+    const MathCache* cache() const {
+        return cache_;
+    }
+};
+
+// Returns the value to use as |this| value. See also ComputeThis and
+// BoxNonStrictThis in Interpreter.h.
+class MComputeThis
+  : public MUnaryInstruction,
+    public BoxPolicy<0>::Data
+{
+    explicit MComputeThis(MDefinition* def)
+      : MUnaryInstruction(classOpcode, def)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ComputeThis)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+
+    // Note: don't override getAliasSet: the thisValue hook can be effectful.
+};
+
+// Load an arrow function's |new.target| value.
+class MArrowNewTarget
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MArrowNewTarget(MDefinition* callee)
+      : MUnaryInstruction(classOpcode, callee)
+    {
+        setResultType(MIRType::Value);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(ArrowNewTarget)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, callee))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        // An arrow function's lexical |this| value is immutable.
+        return AliasSet::None();
+    }
+};
+
+// The goal of a Beta node is to split a def at a conditionally taken
+// branch, so that uses dominated by it have a different name.
+class MBeta
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  private:
+    // This is the range induced by a comparison and branch in a preceding
+    // block. Note that this does not reflect any range constraints from
+    // the input value itself, so this value may differ from the range()
+    // range after it is computed.
+    const Range* comparison_;
+
+    MBeta(MDefinition* val, const Range* comp)
+        : MUnaryInstruction(classOpcode, val),
+          comparison_(comp)
+    {
+        setResultType(val->type());
+        setResultTypeSet(val->resultTypeSet());
+    }
+
+  public:
+    INSTRUCTION_HEADER(Beta)
+    TRIVIAL_NEW_WRAPPERS
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+};
+
+// If input evaluates to false (i.e. it's NaN, 0 or -0), 0 is returned, else the input is returned
+class MNaNToZero
+  : public MUnaryInstruction,
+    public DoublePolicy<0>::Data
+{
+    bool operandIsNeverNaN_;
+    bool operandIsNeverNegativeZero_;
+
+    explicit MNaNToZero(MDefinition* input)
+      : MUnaryInstruction(classOpcode, input),
+        operandIsNeverNaN_(false),
+        operandIsNeverNegativeZero_(false)
+    {
+        setResultType(MIRType::Double);
+        setMovable();
+    }
+  public:
+    INSTRUCTION_HEADER(NaNToZero)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool operandIsNeverNaN() const {
+        return operandIsNeverNaN_;
+    }
+
+    bool operandIsNeverNegativeZero() const {
+        return operandIsNeverNegativeZero_;
+    }
+
+    void collectRangeInfoPreTrunc() override;
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+
+    bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MNaNToZero)
+};
+
+// MIR representation of a Value on the OSR BaselineFrame.
+// The Value is indexed off of OsrFrameReg.
+class MOsrValue
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  private:
+    ptrdiff_t frameOffset_;
+
+    MOsrValue(MOsrEntry* entry, ptrdiff_t frameOffset)
+      : MUnaryInstruction(classOpcode, entry),
+        frameOffset_(frameOffset)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(OsrValue)
+    TRIVIAL_NEW_WRAPPERS
+
+    ptrdiff_t frameOffset() const {
+        return frameOffset_;
+    }
+
+    MOsrEntry* entry() {
+        return getOperand(0)->toOsrEntry();
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+// MIR representation of a JSObject scope chain pointer on the OSR BaselineFrame.
+// The pointer is indexed off of OsrFrameReg.
+class MOsrEnvironmentChain
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  private:
+    explicit MOsrEnvironmentChain(MOsrEntry* entry)
+      : MUnaryInstruction(classOpcode, entry)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(OsrEnvironmentChain)
+    TRIVIAL_NEW_WRAPPERS
+
+    MOsrEntry* entry() {
+        return getOperand(0)->toOsrEntry();
+    }
+};
+
+// MIR representation of a JSObject ArgumentsObject pointer on the OSR BaselineFrame.
+// The pointer is indexed off of OsrFrameReg.
+class MOsrArgumentsObject
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  private:
+    explicit MOsrArgumentsObject(MOsrEntry* entry)
+      : MUnaryInstruction(classOpcode, entry)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(OsrArgumentsObject)
+    TRIVIAL_NEW_WRAPPERS
+
+    MOsrEntry* entry() {
+        return getOperand(0)->toOsrEntry();
+    }
+};
+
+// MIR representation of the return value on the OSR BaselineFrame.
+// The Value is indexed off of OsrFrameReg.
+class MOsrReturnValue
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+  private:
+    explicit MOsrReturnValue(MOsrEntry* entry)
+      : MUnaryInstruction(classOpcode, entry)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(OsrReturnValue)
+    TRIVIAL_NEW_WRAPPERS
+
+    MOsrEntry* entry() {
+        return getOperand(0)->toOsrEntry();
+    }
+};
+
+class MUnarySharedStub
+  : public MUnaryInstruction,
+    public BoxPolicy<0>::Data
+{
+    explicit MUnarySharedStub(MDefinition* input)
+      : MUnaryInstruction(classOpcode, input)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(UnarySharedStub)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+// Checks if a value is JS_UNINITIALIZED_LEXICAL, bailout out if so, leaving
+// it to baseline to throw at the correct pc.
+class MLexicalCheck
+  : public MUnaryInstruction,
+    public BoxPolicy<0>::Data
+{
+    BailoutKind kind_;
+    explicit MLexicalCheck(MDefinition* input, BailoutKind kind = Bailout_UninitializedLexical)
+      : MUnaryInstruction(classOpcode, input),
+        kind_(kind)
+    {
+        setResultType(MIRType::Value);
+        setResultTypeSet(input->resultTypeSet());
+        setMovable();
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(LexicalCheck)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    BailoutKind bailoutKind() const {
+        return kind_;
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+};
+
+// If not defined, set a global variable to |undefined|.
+class MDefVar
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    CompilerPropertyName name_; // Target name to be defined.
+    unsigned attrs_; // Attributes to be set.
+
+  private:
+    MDefVar(PropertyName* name, unsigned attrs, MDefinition* envChain)
+      : MUnaryInstruction(classOpcode, envChain),
+        name_(name),
+        attrs_(attrs)
+    {
+    }
+
+  public:
+    INSTRUCTION_HEADER(DefVar)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, environmentChain))
+
+    PropertyName* name() const {
+        return name_;
+    }
+    unsigned attrs() const {
+        return attrs_;
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(name_);
+    }
+};
+
+class MRegExpPrototypeOptimizable
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MRegExpPrototypeOptimizable(MDefinition* object)
+      : MUnaryInstruction(classOpcode, object)
+    {
+        setResultType(MIRType::Boolean);
+    }
+
+  public:
+    INSTRUCTION_HEADER(RegExpPrototypeOptimizable)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MGetFirstDollarIndex
+  : public MUnaryInstruction,
+    public StringPolicy<0>::Data
+{
+    explicit MGetFirstDollarIndex(MDefinition* str)
+      : MUnaryInstruction(classOpcode, str)
+    {
+        setResultType(MIRType::Int32);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(GetFirstDollarIndex)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, str))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+};
+
+// Returns obj->slots.
+class MSlots
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MSlots(MDefinition* object)
+      : MUnaryInstruction(classOpcode, object)
+    {
+        setResultType(MIRType::Slots);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Slots)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+
+    ALLOW_CLONE(MSlots)
+};
+
+// Returns obj->elements.
+class MElements
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    bool unboxed_;
+
+    explicit MElements(MDefinition* object, bool unboxed = false)
+      : MUnaryInstruction(classOpcode, object), unboxed_(unboxed)
+    {
+        setResultType(MIRType::Elements);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Elements)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    bool unboxed() const {
+        return unboxed_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins) &&
+               ins->toElements()->unboxed() == unboxed();
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+
+    ALLOW_CLONE(MElements)
+};
+
+// Passes through an object's elements, after ensuring it is entirely doubles.
+class MConvertElementsToDoubles
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    explicit MConvertElementsToDoubles(MDefinition* elements)
+      : MUnaryInstruction(classOpcode, elements)
+    {
+        setGuard();
+        setMovable();
+        setResultType(MIRType::Elements);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ConvertElementsToDoubles)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        // This instruction can read and write to the elements' contents.
+        // However, it is alright to hoist this from loops which explicitly
+        // read or write to the elements: such reads and writes will use double
+        // values and can be reordered freely wrt this conversion, except that
+        // definite double loads must follow the conversion. The latter
+        // property is ensured by chaining this instruction with the elements
+        // themselves, in the same manner as MBoundsCheck.
+        return AliasSet::None();
+    }
+};
+
+// Passes through an object, after ensuring its elements are not copy on write.
+class MMaybeCopyElementsForWrite
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    bool checkNative_;
+
+    explicit MMaybeCopyElementsForWrite(MDefinition* object, bool checkNative)
+      : MUnaryInstruction(classOpcode, object), checkNative_(checkNative)
+    {
+        setGuard();
+        setMovable();
+        setResultType(MIRType::Object);
+        setResultTypeSet(object->resultTypeSet());
+    }
+
+  public:
+    INSTRUCTION_HEADER(MaybeCopyElementsForWrite)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    bool checkNative() const {
+        return checkNative_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins) &&
+               checkNative() == ins->toMaybeCopyElementsForWrite()->checkNative();
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::ObjectFields);
+    }
+#ifdef DEBUG
+    bool needsResumePoint() const override {
+        // This instruction is idempotent and does not change observable
+        // behavior, so does not need its own resume point.
+        return false;
+    }
+#endif
+
+};
+
+// Load the initialized length from an elements header.
+class MInitializedLength
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    explicit MInitializedLength(MDefinition* elements)
+      : MUnaryInstruction(classOpcode, elements)
+    {
+        setResultType(MIRType::Int32);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(InitializedLength)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+
+    ALLOW_CLONE(MInitializedLength)
+};
+
+// Load the array length from an elements header.
+class MArrayLength
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    explicit MArrayLength(MDefinition* elements)
+      : MUnaryInstruction(classOpcode, elements)
+    {
+        setResultType(MIRType::Int32);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(ArrayLength)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+
+    ALLOW_CLONE(MArrayLength)
+};
+
+// Read the length of a typed array.
+class MTypedArrayLength
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MTypedArrayLength(MDefinition* obj)
+      : MUnaryInstruction(classOpcode, obj)
+    {
+        setResultType(MIRType::Int32);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(TypedArrayLength)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::TypedArrayLength);
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+};
+
+// Load a typed array's elements vector.
+class MTypedArrayElements
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MTypedArrayElements(MDefinition* object)
+      : MUnaryInstruction(classOpcode, object)
+    {
+        setResultType(MIRType::Elements);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(TypedArrayElements)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+
+    ALLOW_CLONE(MTypedArrayElements)
+};
+
+// Load a binary data object's "elements", which is just its opaque
+// binary data space. Eventually this should probably be
+// unified with `MTypedArrayElements`.
+class MTypedObjectElements
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    bool definitelyOutline_;
+
+  private:
+    explicit MTypedObjectElements(MDefinition* object, bool definitelyOutline)
+      : MUnaryInstruction(classOpcode, object),
+        definitelyOutline_(definitelyOutline)
+    {
+        setResultType(MIRType::Elements);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(TypedObjectElements)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    bool definitelyOutline() const {
+        return definitelyOutline_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isTypedObjectElements())
+            return false;
+        const MTypedObjectElements* other = ins->toTypedObjectElements();
+        if (other->definitelyOutline() != definitelyOutline())
+            return false;
+        return congruentIfOperandsEqual(other);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+};
+
+class MKeepAliveObject
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MKeepAliveObject(MDefinition* object)
+      : MUnaryInstruction(classOpcode, object)
+    {
+        setResultType(MIRType::None);
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(KeepAliveObject)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+};
+
+// Perform !-operation
+class MNot
+  : public MUnaryInstruction,
+    public TestPolicy::Data
+{
+    bool operandMightEmulateUndefined_;
+    bool operandIsNeverNaN_;
+
+    explicit MNot(MDefinition* input, CompilerConstraintList* constraints = nullptr)
+      : MUnaryInstruction(classOpcode, input),
+        operandMightEmulateUndefined_(true),
+        operandIsNeverNaN_(false)
+    {
+        setResultType(MIRType::Boolean);
+        setMovable();
+        if (constraints)
+            cacheOperandMightEmulateUndefined(constraints);
+    }
+
+    void cacheOperandMightEmulateUndefined(CompilerConstraintList* constraints);
+
+  public:
+    static MNot* NewInt32(TempAllocator& alloc, MDefinition* input) {
+        MOZ_ASSERT(input->type() == MIRType::Int32 || input->type() == MIRType::Int64);
+        auto* ins = new(alloc) MNot(input);
+        ins->setResultType(MIRType::Int32);
+        return ins;
+    }
+
+    INSTRUCTION_HEADER(Not)
+    TRIVIAL_NEW_WRAPPERS
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    void markNoOperandEmulatesUndefined() {
+        operandMightEmulateUndefined_ = false;
+    }
+    bool operandMightEmulateUndefined() const {
+        return operandMightEmulateUndefined_;
+    }
+    bool operandIsNeverNaN() const {
+        return operandIsNeverNaN_;
+    }
+
+    virtual AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    void collectRangeInfoPreTrunc() override;
+
+    void trySpecializeFloat32(TempAllocator& alloc) override;
+    bool isFloat32Commutative() const override { return true; }
+#ifdef DEBUG
+    bool isConsistentFloat32Use(MUse* use) const override {
+        return true;
+    }
+#endif
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+};
+
+// Bailout if index < minimum.
+class MBoundsCheckLower
+  : public MUnaryInstruction,
+    public IntPolicy<0>::Data
+{
+    int32_t minimum_;
+    bool fallible_;
+
+    explicit MBoundsCheckLower(MDefinition* index)
+      : MUnaryInstruction(classOpcode, index), minimum_(0), fallible_(true)
+    {
+        setGuard();
+        setMovable();
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(BoundsCheckLower)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, index))
+
+    int32_t minimum() const {
+        return minimum_;
+    }
+    void setMinimum(int32_t n) {
+        minimum_ = n;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool fallible() const {
+        return fallible_;
+    }
+    void collectRangeInfoPreTrunc() override;
+};
+
+// Passes through an object, after ensuring it is converted from an unboxed
+// object to a native representation.
+class MConvertUnboxedObjectToNative
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    CompilerObjectGroup group_;
+
+    explicit MConvertUnboxedObjectToNative(MDefinition* obj, ObjectGroup* group)
+      : MUnaryInstruction(classOpcode, obj),
+        group_(group)
+    {
+        setGuard();
+        setMovable();
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ConvertUnboxedObjectToNative)
+    NAMED_OPERANDS((0, object))
+
+    static MConvertUnboxedObjectToNative* New(TempAllocator& alloc, MDefinition* obj,
+                                              ObjectGroup* group);
+
+    ObjectGroup* group() const {
+        return group_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!congruentIfOperandsEqual(ins))
+            return false;
+        return ins->toConvertUnboxedObjectToNative()->group() == group();
+    }
+    AliasSet getAliasSet() const override {
+        // This instruction can read and write to all parts of the object, but
+        // is marked as non-effectful so it can be consolidated by LICM and GVN
+        // and avoid inhibiting other optimizations.
+        //
+        // This is valid to do because when unboxed objects might have a native
+        // group they can be converted to, we do not optimize accesses to the
+        // unboxed objects and do not guard on their group or shape (other than
+        // in this opcode).
+        //
+        // Later accesses can assume the object has a native representation
+        // and optimize accordingly. Those accesses cannot be reordered before
+        // this instruction, however. This is prevented by chaining this
+        // instruction with the object itself, in the same way as MBoundsCheck.
+        return AliasSet::None();
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(group_);
+    }
+};
+
+// Array.prototype.pop or Array.prototype.shift on a dense array.
+class MArrayPopShift
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+  public:
+    enum Mode {
+        Pop,
+        Shift
+    };
+
+  private:
+    Mode mode_;
+    bool needsHoleCheck_;
+    bool maybeUndefined_;
+
+    MArrayPopShift(MDefinition* object, Mode mode,
+                   bool needsHoleCheck, bool maybeUndefined)
+      : MUnaryInstruction(classOpcode, object), mode_(mode),
+        needsHoleCheck_(needsHoleCheck), maybeUndefined_(maybeUndefined)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(ArrayPopShift)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    bool needsHoleCheck() const {
+        return needsHoleCheck_;
+    }
+    bool maybeUndefined() const {
+        return maybeUndefined_;
+    }
+    bool mode() const {
+        return mode_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
+    }
+
+    ALLOW_CLONE(MArrayPopShift)
+};
+
+// Load a value fallibly or infallibly from a statically known typed array.
+class MLoadTypedArrayElementStatic
+  : public MUnaryInstruction,
+    public ConvertToInt32Policy<0>::Data
+{
+    MLoadTypedArrayElementStatic(JSObject* someTypedArray, MDefinition* ptr,
+                                 int32_t offset = 0, bool needsBoundsCheck = true)
+      : MUnaryInstruction(classOpcode, ptr), someTypedArray_(someTypedArray), offset_(offset),
+        needsBoundsCheck_(needsBoundsCheck), fallible_(true)
+    {
+        int type = accessType();
+        if (type == Scalar::Float32)
+            setResultType(MIRType::Float32);
+        else if (type == Scalar::Float64)
+            setResultType(MIRType::Double);
+        else
+            setResultType(MIRType::Int32);
+    }
+
+    CompilerObject someTypedArray_;
+
+    // An offset to be encoded in the load instruction - taking advantage of the
+    // addressing modes. This is only non-zero when the access is proven to be
+    // within bounds.
+    int32_t offset_;
+    bool needsBoundsCheck_;
+    bool fallible_;
+
+  public:
+    INSTRUCTION_HEADER(LoadTypedArrayElementStatic)
+    TRIVIAL_NEW_WRAPPERS
+
+    Scalar::Type accessType() const {
+        return someTypedArray_->as<TypedArrayObject>().type();
+    }
+    SharedMem<void*> base() const;
+    size_t length() const;
+
+    MDefinition* ptr() const { return getOperand(0); }
+    int32_t offset() const { return offset_; }
+    void setOffset(int32_t offset) { offset_ = offset; }
+    bool congruentTo(const MDefinition* ins) const override;
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::UnboxedElement);
+    }
+
+    bool needsBoundsCheck() const { return needsBoundsCheck_; }
+    void setNeedsBoundsCheck(bool v) { needsBoundsCheck_ = v; }
+
+    bool fallible() const {
+        return fallible_;
+    }
+
+    void setInfallible() {
+        fallible_ = false;
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+    bool needTruncation(TruncateKind kind) override;
+    bool canProduceFloat32() const override { return accessType() == Scalar::Float32; }
+    void collectRangeInfoPreTrunc() override;
+
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(someTypedArray_);
+    }
+};
+
+// Clamp input to range [0, 255] for Uint8ClampedArray.
+class MClampToUint8
+  : public MUnaryInstruction,
+    public ClampPolicy::Data
+{
+    explicit MClampToUint8(MDefinition* input)
+      : MUnaryInstruction(classOpcode, input)
+    {
+        setResultType(MIRType::Int32);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(ClampToUint8)
+    TRIVIAL_NEW_WRAPPERS
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    void computeRange(TempAllocator& alloc) override;
+
+    ALLOW_CLONE(MClampToUint8)
+};
+
+class MLoadFixedSlot
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    size_t slot_;
+
+  protected:
+    MLoadFixedSlot(MDefinition* obj, size_t slot)
+      : MUnaryInstruction(classOpcode, obj), slot_(slot)
+    {
+        setResultType(MIRType::Value);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(LoadFixedSlot)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    size_t slot() const {
+        return slot_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isLoadFixedSlot())
+            return false;
+        if (slot() != ins->toLoadFixedSlot()->slot())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::FixedSlot);
+    }
+
+    AliasType mightAlias(const MDefinition* store) const override;
+
+    ALLOW_CLONE(MLoadFixedSlot)
+};
+
+class MLoadFixedSlotAndUnbox
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    size_t slot_;
+    MUnbox::Mode mode_;
+    BailoutKind bailoutKind_;
+  protected:
+    MLoadFixedSlotAndUnbox(MDefinition* obj, size_t slot, MUnbox::Mode mode, MIRType type,
+                           BailoutKind kind)
+      : MUnaryInstruction(classOpcode, obj), slot_(slot), mode_(mode), bailoutKind_(kind)
+    {
+        setResultType(type);
+        setMovable();
+        if (mode_ == MUnbox::TypeBarrier || mode_ == MUnbox::Fallible)
+            setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(LoadFixedSlotAndUnbox)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    size_t slot() const {
+        return slot_;
+    }
+    MUnbox::Mode mode() const {
+        return mode_;
+    }
+    BailoutKind bailoutKind() const {
+        return bailoutKind_;
+    }
+    bool fallible() const {
+        return mode_ != MUnbox::Infallible;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isLoadFixedSlotAndUnbox() ||
+            slot() != ins->toLoadFixedSlotAndUnbox()->slot() ||
+            mode() != ins->toLoadFixedSlotAndUnbox()->mode())
+        {
+            return false;
+        }
+        return congruentIfOperandsEqual(ins);
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::FixedSlot);
+    }
+
+    AliasType mightAlias(const MDefinition* store) const override;
+
+    ALLOW_CLONE(MLoadFixedSlotAndUnbox);
+};
+
+class MHomeObjectSuperBase
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MHomeObjectSuperBase(MDefinition* homeObject)
+      : MUnaryInstruction(classOpcode, homeObject)
+    {
+        setResultType(MIRType::Object);
+        setGuard(); // May throw if [[Prototype]] is null
+    }
+
+  public:
+    INSTRUCTION_HEADER(HomeObjectSuperBase)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, homeObject))
+};
+
+// Emit code to load a value from an object if it matches one of the receivers
+// observed by the baseline IC, else bails out.
+class MGetPropertyPolymorphic
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    Vector<PolymorphicEntry, 4, JitAllocPolicy> receivers_;
+    CompilerPropertyName name_;
+
+    MGetPropertyPolymorphic(TempAllocator& alloc, MDefinition* obj, PropertyName* name)
+      : MUnaryInstruction(classOpcode, obj),
+        receivers_(alloc),
+        name_(name)
+    {
+        setGuard();
+        setMovable();
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(GetPropertyPolymorphic)
+    NAMED_OPERANDS((0, object))
+
+    static MGetPropertyPolymorphic* New(TempAllocator& alloc, MDefinition* obj, PropertyName* name) {
+        return new(alloc) MGetPropertyPolymorphic(alloc, obj, name);
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isGetPropertyPolymorphic())
+            return false;
+        if (name() != ins->toGetPropertyPolymorphic()->name())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+
+    MOZ_MUST_USE bool addReceiver(const ReceiverGuard& receiver, Shape* shape) {
+        PolymorphicEntry entry;
+        entry.receiver = receiver;
+        entry.shape = shape;
+        return receivers_.append(entry);
+    }
+    size_t numReceivers() const {
+        return receivers_.length();
+    }
+    const ReceiverGuard receiver(size_t i) const {
+        return receivers_[i].receiver;
+    }
+    Shape* shape(size_t i) const {
+        return receivers_[i].shape;
+    }
+    PropertyName* name() const {
+        return name_;
+    }
+    AliasSet getAliasSet() const override {
+        bool hasUnboxedLoad = false;
+        for (size_t i = 0; i < numReceivers(); i++) {
+            if (!shape(i)) {
+                hasUnboxedLoad = true;
+                break;
+            }
+        }
+        return AliasSet::Load(AliasSet::ObjectFields |
+                              AliasSet::FixedSlot |
+                              AliasSet::DynamicSlot |
+                              (hasUnboxedLoad ? AliasSet::UnboxedElement : 0));
+    }
+
+    AliasType mightAlias(const MDefinition* store) const override;
+
+    bool appendRoots(MRootList& roots) const override;
+};
+
+class MBindNameCache
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    CompilerPropertyName name_;
+    CompilerScript script_;
+    jsbytecode* pc_;
+
+    MBindNameCache(MDefinition* envChain, PropertyName* name, JSScript* script, jsbytecode* pc)
+      : MUnaryInstruction(classOpcode, envChain), name_(name), script_(script), pc_(pc)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(BindNameCache)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, environmentChain))
+
+    PropertyName* name() const {
+        return name_;
+    }
+    JSScript* script() const {
+        return script_;
+    }
+    jsbytecode* pc() const {
+        return pc_;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        // Don't append the script, all scripts are added anyway.
+        return roots.append(name_);
+    }
+};
+
+class MCallBindVar
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MCallBindVar(MDefinition* envChain)
+      : MUnaryInstruction(classOpcode, envChain)
+    {
+        setResultType(MIRType::Object);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(CallBindVar)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, environmentChain))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isCallBindVar())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+// Guard on an object's shape.
+class MGuardShape
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    CompilerShape shape_;
+    BailoutKind bailoutKind_;
+
+    MGuardShape(MDefinition* obj, Shape* shape, BailoutKind bailoutKind)
+      : MUnaryInstruction(classOpcode, obj),
+        shape_(shape),
+        bailoutKind_(bailoutKind)
+    {
+        setGuard();
+        setMovable();
+        setResultType(MIRType::Object);
+        setResultTypeSet(obj->resultTypeSet());
+
+        // Disallow guarding on unboxed object shapes. The group is better to
+        // guard on, and guarding on the shape can interact badly with
+        // MConvertUnboxedObjectToNative.
+        MOZ_ASSERT(shape->getObjectClass() != &UnboxedPlainObject::class_);
+    }
+
+  public:
+    INSTRUCTION_HEADER(GuardShape)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    const Shape* shape() const {
+        return shape_;
+    }
+    BailoutKind bailoutKind() const {
+        return bailoutKind_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isGuardShape())
+            return false;
+        if (shape() != ins->toGuardShape()->shape())
+            return false;
+        if (bailoutKind() != ins->toGuardShape()->bailoutKind())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(shape_);
+    }
+};
+
+// Bail if the object's shape or unboxed group is not in the input list.
+class MGuardReceiverPolymorphic
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    Vector<ReceiverGuard, 4, JitAllocPolicy> receivers_;
+
+    MGuardReceiverPolymorphic(TempAllocator& alloc, MDefinition* obj)
+      : MUnaryInstruction(classOpcode, obj),
+        receivers_(alloc)
+    {
+        setGuard();
+        setMovable();
+        setResultType(MIRType::Object);
+        setResultTypeSet(obj->resultTypeSet());
+    }
+
+  public:
+    INSTRUCTION_HEADER(GuardReceiverPolymorphic)
+    NAMED_OPERANDS((0, object))
+
+    static MGuardReceiverPolymorphic* New(TempAllocator& alloc, MDefinition* obj) {
+        return new(alloc) MGuardReceiverPolymorphic(alloc, obj);
+    }
+
+    MOZ_MUST_USE bool addReceiver(const ReceiverGuard& receiver) {
+        return receivers_.append(receiver);
+    }
+    size_t numReceivers() const {
+        return receivers_.length();
+    }
+    const ReceiverGuard& receiver(size_t i) const {
+        return receivers_[i];
+    }
+
+    bool congruentTo(const MDefinition* ins) const override;
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+
+    bool appendRoots(MRootList& roots) const override;
+
+};
+
+// Guard on an object's group, inclusively or exclusively.
+class MGuardObjectGroup
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    CompilerObjectGroup group_;
+    bool bailOnEquality_;
+    BailoutKind bailoutKind_;
+
+    MGuardObjectGroup(MDefinition* obj, ObjectGroup* group, bool bailOnEquality,
+                      BailoutKind bailoutKind)
+      : MUnaryInstruction(classOpcode, obj),
+        group_(group),
+        bailOnEquality_(bailOnEquality),
+        bailoutKind_(bailoutKind)
+    {
+        setGuard();
+        setMovable();
+        setResultType(MIRType::Object);
+
+        // Unboxed groups which might be converted to natives can't be guarded
+        // on, due to MConvertUnboxedObjectToNative.
+        MOZ_ASSERT_IF(group->maybeUnboxedLayoutDontCheckGeneration(),
+                      !group->unboxedLayoutDontCheckGeneration().nativeGroup());
+    }
+
+  public:
+    INSTRUCTION_HEADER(GuardObjectGroup)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    const ObjectGroup* group() const {
+        return group_;
+    }
+    bool bailOnEquality() const {
+        return bailOnEquality_;
+    }
+    BailoutKind bailoutKind() const {
+        return bailoutKind_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isGuardObjectGroup())
+            return false;
+        if (group() != ins->toGuardObjectGroup()->group())
+            return false;
+        if (bailOnEquality() != ins->toGuardObjectGroup()->bailOnEquality())
+            return false;
+        if (bailoutKind() != ins->toGuardObjectGroup()->bailoutKind())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(group_);
+    }
+};
+
+// Guard on an object's class.
+class MGuardClass
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    const Class* class_;
+
+    MGuardClass(MDefinition* obj, const Class* clasp)
+      : MUnaryInstruction(classOpcode, obj),
+        class_(clasp)
+    {
+        setGuard();
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(GuardClass)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    const Class* getClass() const {
+        return class_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isGuardClass())
+            return false;
+        if (getClass() != ins->toGuardClass()->getClass())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+
+    ALLOW_CLONE(MGuardClass)
+};
+
+// Guard on the presence or absence of an unboxed object's expando.
+class MGuardUnboxedExpando
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    bool requireExpando_;
+    BailoutKind bailoutKind_;
+
+    MGuardUnboxedExpando(MDefinition* obj, bool requireExpando, BailoutKind bailoutKind)
+      : MUnaryInstruction(classOpcode, obj),
+        requireExpando_(requireExpando),
+        bailoutKind_(bailoutKind)
+    {
+        setGuard();
+        setMovable();
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(GuardUnboxedExpando)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    bool requireExpando() const {
+        return requireExpando_;
+    }
+    BailoutKind bailoutKind() const {
+        return bailoutKind_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!congruentIfOperandsEqual(ins))
+            return false;
+        if (requireExpando() != ins->toGuardUnboxedExpando()->requireExpando())
+            return false;
+        return true;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+};
+
+// Load an unboxed plain object's expando.
+class MLoadUnboxedExpando
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+  private:
+    explicit MLoadUnboxedExpando(MDefinition* object)
+      : MUnaryInstruction(classOpcode, object)
+    {
+        setResultType(MIRType::Object);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(LoadUnboxedExpando)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+};
+
+// Load from vp[slot] (slots that are not inline in an object).
+class MLoadSlot
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    uint32_t slot_;
+
+    MLoadSlot(MDefinition* slots, uint32_t slot)
+      : MUnaryInstruction(classOpcode, slots),
+        slot_(slot)
+    {
+        setResultType(MIRType::Value);
+        setMovable();
+        MOZ_ASSERT(slots->type() == MIRType::Slots);
+    }
+
+  public:
+    INSTRUCTION_HEADER(LoadSlot)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, slots))
+
+    uint32_t slot() const {
+        return slot_;
+    }
+
+    HashNumber valueHash() const override;
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isLoadSlot())
+            return false;
+        if (slot() != ins->toLoadSlot()->slot())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    AliasSet getAliasSet() const override {
+        MOZ_ASSERT(slots()->type() == MIRType::Slots);
+        return AliasSet::Load(AliasSet::DynamicSlot);
+    }
+    AliasType mightAlias(const MDefinition* store) const override;
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    ALLOW_CLONE(MLoadSlot)
+};
+
+// Inline call to access a function's environment (scope chain).
+class MFunctionEnvironment
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MFunctionEnvironment(MDefinition* function)
+        : MUnaryInstruction(classOpcode, function)
+    {
+        setResultType(MIRType::Object);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(FunctionEnvironment)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, function))
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    // A function's environment is fixed.
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+// Allocate a new LexicalEnvironmentObject.
+class MNewLexicalEnvironmentObject
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    CompilerGCPointer<LexicalScope*> scope_;
+
+    MNewLexicalEnvironmentObject(MDefinition* enclosing, LexicalScope* scope)
+      : MUnaryInstruction(classOpcode, enclosing),
+        scope_(scope)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(NewLexicalEnvironmentObject)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, enclosing))
+
+    LexicalScope* scope() const {
+        return scope_;
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(scope_);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+// Allocate a new LexicalEnvironmentObject from existing one
+class MCopyLexicalEnvironmentObject
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    bool copySlots_;
+
+    MCopyLexicalEnvironmentObject(MDefinition* env, bool copySlots)
+      : MUnaryInstruction(classOpcode, env),
+        copySlots_(copySlots)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(CopyLexicalEnvironmentObject)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, env))
+
+    bool copySlots() const {
+        return copySlots_;
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields |
+                              AliasSet::FixedSlot |
+                              AliasSet::DynamicSlot);
+    }
+};
+
+class MHomeObject
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MHomeObject(MDefinition* function)
+        : MUnaryInstruction(classOpcode, function)
+    {
+        setResultType(MIRType::Object);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(HomeObject)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, function))
+
+    // A function's [[HomeObject]] is fixed.
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MGetNameCache
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+  private:
+    explicit MGetNameCache(MDefinition* obj)
+      : MUnaryInstruction(classOpcode, obj)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(GetNameCache)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, envObj))
+};
+
+class MDeleteProperty
+  : public MUnaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    CompilerPropertyName name_;
+    bool strict_;
+
+  protected:
+    MDeleteProperty(MDefinition* val, PropertyName* name, bool strict)
+      : MUnaryInstruction(classOpcode, val),
+        name_(name),
+        strict_(strict)
+    {
+        setResultType(MIRType::Boolean);
+    }
+
+  public:
+    INSTRUCTION_HEADER(DeleteProperty)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, value))
+
+    PropertyName* name() const {
+        return name_;
+    }
+    bool strict() const {
+        return strict_;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(name_);
+    }
+};
+
+class MCallGetProperty
+  : public MUnaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    CompilerPropertyName name_;
+    bool idempotent_;
+
+    MCallGetProperty(MDefinition* value, PropertyName* name)
+      : MUnaryInstruction(classOpcode, value), name_(name),
+        idempotent_(false)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(CallGetProperty)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, value))
+
+    PropertyName* name() const {
+        return name_;
+    }
+
+    // Constructors need to perform a GetProp on the function prototype.
+    // Since getters cannot be set on the prototype, fetching is non-effectful.
+    // The operation may be safely repeated in case of bailout.
+    void setIdempotent() {
+        idempotent_ = true;
+    }
+    AliasSet getAliasSet() const override {
+        if (!idempotent_)
+            return AliasSet::Store(AliasSet::Any);
+        return AliasSet::None();
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(name_);
+    }
+};
+
+class MStringLength
+  : public MUnaryInstruction,
+    public StringPolicy<0>::Data
+{
+    explicit MStringLength(MDefinition* string)
+      : MUnaryInstruction(classOpcode, string)
+    {
+        setResultType(MIRType::Int32);
+        setMovable();
+    }
+  public:
+    INSTRUCTION_HEADER(StringLength)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, string))
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        // The string |length| property is immutable, so there is no
+        // implicit dependency.
+        return AliasSet::None();
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MStringLength)
+};
+
+// Inlined assembly for Math.floor(double | float32) -> int32.
+class MFloor
+  : public MUnaryInstruction,
+    public FloatingPointPolicy<0>::Data
+{
+    explicit MFloor(MDefinition* num)
+      : MUnaryInstruction(classOpcode, num)
+    {
+        setResultType(MIRType::Int32);
+        specialization_ = MIRType::Double;
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Floor)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool isFloat32Commutative() const override {
+        return true;
+    }
+    void trySpecializeFloat32(TempAllocator& alloc) override;
+#ifdef DEBUG
+    bool isConsistentFloat32Use(MUse* use) const override {
+        return true;
+    }
+#endif
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    void computeRange(TempAllocator& alloc) override;
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MFloor)
+};
+
+// Inlined assembly version for Math.ceil(double | float32) -> int32.
+class MCeil
+  : public MUnaryInstruction,
+    public FloatingPointPolicy<0>::Data
+{
+    explicit MCeil(MDefinition* num)
+      : MUnaryInstruction(classOpcode, num)
+    {
+        setResultType(MIRType::Int32);
+        specialization_ = MIRType::Double;
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Ceil)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool isFloat32Commutative() const override {
+        return true;
+    }
+    void trySpecializeFloat32(TempAllocator& alloc) override;
+#ifdef DEBUG
+    bool isConsistentFloat32Use(MUse* use) const override {
+        return true;
+    }
+#endif
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    void computeRange(TempAllocator& alloc) override;
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MCeil)
+};
+
+// Inlined version of Math.round(double | float32) -> int32.
+class MRound
+  : public MUnaryInstruction,
+    public FloatingPointPolicy<0>::Data
+{
+    explicit MRound(MDefinition* num)
+      : MUnaryInstruction(classOpcode, num)
+    {
+        setResultType(MIRType::Int32);
+        specialization_ = MIRType::Double;
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Round)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool isFloat32Commutative() const override {
+        return true;
+    }
+    void trySpecializeFloat32(TempAllocator& alloc) override;
+#ifdef DEBUG
+    bool isConsistentFloat32Use(MUse* use) const override {
+        return true;
+    }
+#endif
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MRound)
+};
+
+// NearbyInt rounds the floating-point input to the nearest integer, according
+// to the RoundingMode.
+class MNearbyInt
+  : public MUnaryInstruction,
+    public FloatingPointPolicy<0>::Data
+{
+    RoundingMode roundingMode_;
+
+    explicit MNearbyInt(MDefinition* num, MIRType resultType, RoundingMode roundingMode)
+      : MUnaryInstruction(classOpcode, num),
+        roundingMode_(roundingMode)
+    {
+        MOZ_ASSERT(HasAssemblerSupport(roundingMode));
+
+        MOZ_ASSERT(IsFloatingPointType(resultType));
+        setResultType(resultType);
+        specialization_ = resultType;
+
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(NearbyInt)
+    TRIVIAL_NEW_WRAPPERS
+
+    static bool HasAssemblerSupport(RoundingMode mode) {
+        return Assembler::HasRoundInstruction(mode);
+    }
+
+    RoundingMode roundingMode() const { return roundingMode_; }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool isFloat32Commutative() const override {
+        return true;
+    }
+    void trySpecializeFloat32(TempAllocator& alloc) override;
+#ifdef DEBUG
+    bool isConsistentFloat32Use(MUse* use) const override {
+        return true;
+    }
+#endif
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins) &&
+               ins->toNearbyInt()->roundingMode() == roundingMode_;
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+
+    bool canRecoverOnBailout() const override {
+        switch (roundingMode_) {
+          case RoundingMode::Up:
+          case RoundingMode::Down:
+            return true;
+          default:
+            return false;
+        }
+    }
+
+    ALLOW_CLONE(MNearbyInt)
+};
+
+class MGetIteratorCache
+  : public MUnaryInstruction,
+    public BoxExceptPolicy<0, MIRType::Object>::Data
+{
+    explicit MGetIteratorCache(MDefinition* val)
+      : MUnaryInstruction(classOpcode, val)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(GetIteratorCache)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, value))
+};
+
+class MIteratorMore
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MIteratorMore(MDefinition* iter)
+      : MUnaryInstruction(classOpcode, iter)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(IteratorMore)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, iterator))
+
+};
+
+class MIsNoIter
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    explicit MIsNoIter(MDefinition* def)
+      : MUnaryInstruction(classOpcode, def)
+    {
+        setResultType(MIRType::Boolean);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(IsNoIter)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MIteratorEnd
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MIteratorEnd(MDefinition* iter)
+      : MUnaryInstruction(classOpcode, iter)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(IteratorEnd)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, iterator))
+
+};
+
+// Implementation for instanceof operator with specific rhs.
+class MInstanceOf
+  : public MUnaryInstruction,
+    public InstanceOfPolicy::Data
+{
+    CompilerObject protoObj_;
+
+    MInstanceOf(MDefinition* obj, JSObject* proto)
+      : MUnaryInstruction(classOpcode, obj),
+        protoObj_(proto)
+    {
+        setResultType(MIRType::Boolean);
+    }
+
+  public:
+    INSTRUCTION_HEADER(InstanceOf)
+    TRIVIAL_NEW_WRAPPERS
+
+    JSObject* prototypeObject() {
+        return protoObj_;
+    }
+
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(protoObj_);
+    }
+};
+
+// This MIR instruction is used to get an argument from the actual arguments.
+class MGetFrameArgument
+  : public MUnaryInstruction,
+    public IntPolicy<0>::Data
+{
+    bool scriptHasSetArg_;
+
+    MGetFrameArgument(MDefinition* idx, bool scriptHasSetArg)
+      : MUnaryInstruction(classOpcode, idx),
+        scriptHasSetArg_(scriptHasSetArg)
+    {
+        setResultType(MIRType::Value);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(GetFrameArgument)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, index))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        // If the script doesn't have any JSOP_SETARG ops, then this instruction is never
+        // aliased.
+        if (scriptHasSetArg_)
+            return AliasSet::Load(AliasSet::FrameArgument);
+        return AliasSet::None();
+    }
+};
+
+// This MIR instruction is used to set an argument value in the frame.
+class MSetFrameArgument
+  : public MUnaryInstruction,
+    public NoFloatPolicy<0>::Data
+{
+    uint32_t argno_;
+
+    MSetFrameArgument(uint32_t argno, MDefinition* value)
+      : MUnaryInstruction(classOpcode, value),
+        argno_(argno)
+    {
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(SetFrameArgument)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, value))
+
+    uint32_t argno() const {
+        return argno_;
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return false;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::FrameArgument);
+    }
+};
+
+class MRest
+  : public MUnaryInstruction,
+    public MRestCommon,
+    public IntPolicy<0>::Data
+{
+    MRest(TempAllocator& alloc, CompilerConstraintList* constraints, MDefinition* numActuals,
+          unsigned numFormals, ArrayObject* templateObject)
+      : MUnaryInstruction(classOpcode, numActuals),
+        MRestCommon(numFormals, templateObject)
+    {
+        setResultType(MIRType::Object);
+        setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, templateObject));
+    }
+
+  public:
+    INSTRUCTION_HEADER(Rest)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+    NAMED_OPERANDS((0, numActuals))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(templateObject());
+    }
+};
+
+class MFilterTypeSet
+  : public MUnaryInstruction,
+    public FilterTypeSetPolicy::Data
+{
+    MFilterTypeSet(MDefinition* def, TemporaryTypeSet* types)
+      : MUnaryInstruction(classOpcode, def)
+    {
+        MOZ_ASSERT(!types->unknown());
+        setResultType(types->getKnownMIRType());
+        setResultTypeSet(types);
+    }
+
+  public:
+    INSTRUCTION_HEADER(FilterTypeSet)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool congruentTo(const MDefinition* def) const override {
+        return false;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    virtual bool neverHoist() const override {
+        return resultTypeSet()->empty();
+    }
+    void computeRange(TempAllocator& alloc) override;
+
+    bool isFloat32Commutative() const override {
+        return IsFloatingPointType(type());
+    }
+
+    bool canProduceFloat32() const override;
+    bool canConsumeFloat32(MUse* operand) const override;
+    void trySpecializeFloat32(TempAllocator& alloc) override;
+};
+
+// Given a value, guard that the value is in a particular TypeSet, then returns
+// that value.
+class MTypeBarrier
+  : public MUnaryInstruction,
+    public TypeBarrierPolicy::Data
+{
+    BarrierKind barrierKind_;
+
+    MTypeBarrier(MDefinition* def, TemporaryTypeSet* types,
+                 BarrierKind kind = BarrierKind::TypeSet)
+      : MUnaryInstruction(classOpcode, def),
+        barrierKind_(kind)
+    {
+        MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
+
+        MOZ_ASSERT(!types->unknown());
+        setResultType(types->getKnownMIRType());
+        setResultTypeSet(types);
+
+        setGuard();
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(TypeBarrier)
+    TRIVIAL_NEW_WRAPPERS
+
+    void printOpcode(GenericPrinter& out) const override;
+    bool congruentTo(const MDefinition* def) const override;
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    virtual bool neverHoist() const override {
+        return resultTypeSet()->empty();
+    }
+    BarrierKind barrierKind() const {
+        return barrierKind_;
+    }
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    bool alwaysBails() const {
+        // If mirtype of input doesn't agree with mirtype of barrier,
+        // we will definitely bail.
+        MIRType type = resultTypeSet()->getKnownMIRType();
+        if (type == MIRType::Value)
+            return false;
+        if (input()->type() == MIRType::Value)
+            return false;
+        if (input()->type() == MIRType::ObjectOrNull) {
+            // The ObjectOrNull optimization is only performed when the
+            // barrier's type is MIRType::Null.
+            MOZ_ASSERT(type == MIRType::Null);
+            return false;
+        }
+        return input()->type() != type;
+    }
+
+    ALLOW_CLONE(MTypeBarrier)
+};
+
+// Like MTypeBarrier, guard that the value is in the given type set. This is
+// used before property writes to ensure the value being written is represented
+// in the property types for the object.
+class MMonitorTypes
+  : public MUnaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    const TemporaryTypeSet* typeSet_;
+    BarrierKind barrierKind_;
+
+    MMonitorTypes(MDefinition* def, const TemporaryTypeSet* types, BarrierKind kind)
+      : MUnaryInstruction(classOpcode, def),
+        typeSet_(types),
+        barrierKind_(kind)
+    {
+        MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
+
+        setGuard();
+        MOZ_ASSERT(!types->unknown());
+    }
+
+  public:
+    INSTRUCTION_HEADER(MonitorTypes)
+    TRIVIAL_NEW_WRAPPERS
+
+    const TemporaryTypeSet* typeSet() const {
+        return typeSet_;
+    }
+    BarrierKind barrierKind() const {
+        return barrierKind_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MNewCallObjectBase : public MUnaryInstruction
+                         , public SingleObjectPolicy::Data
+{
+  protected:
+    MNewCallObjectBase(Opcode op, MConstant* templateObj)
+      : MUnaryInstruction(op, templateObj)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    CallObject* templateObject() const {
+        return &getOperand(0)->toConstant()->toObject().as<CallObject>();
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MIsCallable
+  : public MUnaryInstruction,
+    public BoxExceptPolicy<0, MIRType::Object>::Data
+{
+    explicit MIsCallable(MDefinition* object)
+      : MUnaryInstruction(classOpcode, object)
+    {
+        MOZ_ASSERT(object->type() == MIRType::Object || object->type() == MIRType::Value);
+        setResultType(MIRType::Boolean);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(IsCallable)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MIsConstructor
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+  public:
+    explicit MIsConstructor(MDefinition* object)
+      : MUnaryInstruction(classOpcode, object)
+    {
+        setResultType(MIRType::Boolean);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(IsConstructor)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MIsObject
+  : public MUnaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    explicit MIsObject(MDefinition* object)
+    : MUnaryInstruction(classOpcode, object)
+    {
+        setResultType(MIRType::Boolean);
+        setMovable();
+    }
+  public:
+    INSTRUCTION_HEADER(IsObject)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MHasClass
+    : public MUnaryInstruction,
+      public SingleObjectPolicy::Data
+{
+    const Class* class_;
+
+    MHasClass(MDefinition* object, const Class* clasp)
+      : MUnaryInstruction(classOpcode, object)
+      , class_(clasp)
+    {
+        MOZ_ASSERT(object->type() == MIRType::Object ||
+                   (object->type() == MIRType::Value && object->mightBeType(MIRType::Object)));
+        setResultType(MIRType::Boolean);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(HasClass)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    const Class* getClass() const {
+        return class_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isHasClass())
+            return false;
+        if (getClass() != ins->toHasClass()->getClass())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+};
+
+// Note: we might call a proxy trap, so this instruction is effectful.
+class MIsArray
+  : public MUnaryInstruction,
+    public BoxExceptPolicy<0, MIRType::Object>::Data
+{
+    explicit MIsArray(MDefinition* value)
+      : MUnaryInstruction(classOpcode, value)
+    {
+        setResultType(MIRType::Boolean);
+    }
+
+  public:
+    INSTRUCTION_HEADER(IsArray)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, value))
+};
+
+class MIsTypedArray
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MIsTypedArray(MDefinition* value)
+      : MUnaryInstruction(classOpcode, value)
+    {
+        setResultType(MIRType::Boolean);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(IsTypedArray)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, value))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MObjectClassToString
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MObjectClassToString(MDefinition* obj)
+      : MUnaryInstruction(classOpcode, obj)
+    {
+        setMovable();
+        setResultType(MIRType::String);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ObjectClassToString)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+};
+
+class MAtomicIsLockFree
+  : public MUnaryInstruction,
+    public ConvertToInt32Policy<0>::Data
+{
+    explicit MAtomicIsLockFree(MDefinition* value)
+      : MUnaryInstruction(classOpcode, value)
+    {
+        setResultType(MIRType::Boolean);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(AtomicIsLockFree)
+    TRIVIAL_NEW_WRAPPERS
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MAtomicIsLockFree)
+};
+
+// This applies to an object that is known to be a TypedArray, it bails out
+// if the obj does not map a SharedArrayBuffer.
+
+class MGuardSharedTypedArray
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MGuardSharedTypedArray(MDefinition* obj)
+      : MUnaryInstruction(classOpcode, obj)
+    {
+        setGuard();
+        setMovable();
+    }
+
+public:
+    INSTRUCTION_HEADER(GuardSharedTypedArray)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MCheckIsObj
+  : public MUnaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    uint8_t checkKind_;
+
+    MCheckIsObj(MDefinition* toCheck, uint8_t checkKind)
+      : MUnaryInstruction(classOpcode, toCheck),
+        checkKind_(checkKind)
+    {
+        setResultType(MIRType::Value);
+        setResultTypeSet(toCheck->resultTypeSet());
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(CheckIsObj)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, checkValue))
+
+    uint8_t checkKind() const { return checkKind_; }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MCheckIsCallable
+  : public MUnaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    uint8_t checkKind_;
+
+    MCheckIsCallable(MDefinition* toCheck, uint8_t checkKind)
+      : MUnaryInstruction(classOpcode, toCheck),
+        checkKind_(checkKind)
+    {
+        setResultType(MIRType::Value);
+        setResultTypeSet(toCheck->resultTypeSet());
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(CheckIsCallable)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, checkValue))
+
+    uint8_t checkKind() const { return checkKind_; }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MCheckObjCoercible
+  : public MUnaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    explicit MCheckObjCoercible(MDefinition* toCheck)
+      : MUnaryInstruction(classOpcode, toCheck)
+    {
+        setGuard();
+        setResultType(MIRType::Value);
+        setResultTypeSet(toCheck->resultTypeSet());
+    }
+
+  public:
+    INSTRUCTION_HEADER(CheckObjCoercible)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, checkValue))
+};
+
+class MDebugCheckSelfHosted
+  : public MUnaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    explicit MDebugCheckSelfHosted(MDefinition* toCheck)
+      : MUnaryInstruction(classOpcode, toCheck)
+    {
+        setGuard();
+        setResultType(MIRType::Value);
+        setResultTypeSet(toCheck->resultTypeSet());
+    }
+
+  public:
+    INSTRUCTION_HEADER(DebugCheckSelfHosted)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, checkValue))
+
+};
+
+class MIsPackedArray
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MIsPackedArray(MDefinition* array)
+      : MUnaryInstruction(classOpcode, array)
+    {
+        setResultType(MIRType::Boolean);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(IsPackedArray)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, array))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+};
+
+class MGetPrototypeOf
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MGetPrototypeOf(MDefinition* target)
+      : MUnaryInstruction(classOpcode, target)
+    {
+        setResultType(MIRType::Value);
+        setGuard(); // May throw if target is a proxy.
+    }
+
+  public:
+    INSTRUCTION_HEADER(GetPrototypeOf)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, target))
+};
+
+// Flips the input's sign bit, independently of the rest of the number's
+// payload. Note this is different from multiplying by minus-one, which has
+// side-effects for e.g. NaNs.
+class MWasmNeg
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    MWasmNeg(MDefinition* op, MIRType type)
+      : MUnaryInstruction(classOpcode, op)
+    {
+        setResultType(type);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmNeg)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+class MWasmLoadTls
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    uint32_t offset_;
+    AliasSet aliases_;
+
+    explicit MWasmLoadTls(MDefinition* tlsPointer, uint32_t offset, MIRType type, AliasSet aliases)
+      : MUnaryInstruction(classOpcode, tlsPointer),
+        offset_(offset),
+        aliases_(aliases)
+    {
+        // Different Tls data have different alias classes and only those classes are allowed.
+        MOZ_ASSERT(aliases_.flags() == AliasSet::Load(AliasSet::WasmHeapMeta).flags() ||
+                   aliases_.flags() == AliasSet::None().flags());
+
+        // The only types supported at the moment.
+        MOZ_ASSERT(type == MIRType::Pointer || type == MIRType::Int32);
+
+        setMovable();
+        setResultType(type);
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmLoadTls)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, tlsPtr))
+
+    uint32_t offset() const {
+        return offset_;
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return op() == ins->op() &&
+               offset() == ins->toWasmLoadTls()->offset() &&
+               type() == ins->type();
+    }
+
+    HashNumber valueHash() const override {
+        return addU32ToHash(HashNumber(op()), offset());
+    }
+
+    AliasSet getAliasSet() const override {
+        return aliases_;
+    }
+};
+
+class MWasmAddOffset
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    uint32_t offset_;
+    wasm::BytecodeOffset bytecodeOffset_;
+
+    MWasmAddOffset(MDefinition* base, uint32_t offset, wasm::BytecodeOffset bytecodeOffset)
+      : MUnaryInstruction(classOpcode, base),
+        offset_(offset),
+        bytecodeOffset_(bytecodeOffset)
+    {
+        setGuard();
+        setResultType(MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmAddOffset)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, base))
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    uint32_t offset() const {
+        return offset_;
+    }
+    wasm::BytecodeOffset bytecodeOffset() const {
+        return bytecodeOffset_;
+    }
+};
+
+class MWasmLoadGlobalVar
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    MWasmLoadGlobalVar(MIRType type, unsigned globalDataOffset, bool isConstant, MDefinition* tlsPtr)
+      : MUnaryInstruction(classOpcode, tlsPtr),
+        globalDataOffset_(globalDataOffset), isConstant_(isConstant)
+    {
+        MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
+        setResultType(type);
+        setMovable();
+    }
+
+    unsigned globalDataOffset_;
+    bool isConstant_;
+
+  public:
+    INSTRUCTION_HEADER(WasmLoadGlobalVar)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, tlsPtr))
+
+    unsigned globalDataOffset() const { return globalDataOffset_; }
+
+    HashNumber valueHash() const override;
+    bool congruentTo(const MDefinition* ins) const override;
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    AliasSet getAliasSet() const override {
+        return isConstant_ ? AliasSet::None() : AliasSet::Load(AliasSet::WasmGlobalVar);
+    }
+
+    AliasType mightAlias(const MDefinition* def) const override;
+};
+
+class MWasmStackArg
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    MWasmStackArg(uint32_t spOffset, MDefinition* ins)
+      : MUnaryInstruction(classOpcode, ins),
+        spOffset_(spOffset)
+    {}
+
+    uint32_t spOffset_;
+
+  public:
+    INSTRUCTION_HEADER(WasmStackArg)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, arg))
+
+    uint32_t spOffset() const {
+        return spOffset_;
+    }
+    void incrementOffset(uint32_t inc) {
+        spOffset_ += inc;
+    }
+};
+
+class MWasmReinterpret
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    MWasmReinterpret(MDefinition* val, MIRType toType)
+      : MUnaryInstruction(classOpcode, val)
+    {
+        switch (val->type()) {
+          case MIRType::Int32:   MOZ_ASSERT(toType == MIRType::Float32); break;
+          case MIRType::Float32: MOZ_ASSERT(toType == MIRType::Int32);   break;
+          case MIRType::Double:  MOZ_ASSERT(toType == MIRType::Int64);   break;
+          case MIRType::Int64:   MOZ_ASSERT(toType == MIRType::Double);  break;
+          default:              MOZ_CRASH("unexpected reinterpret conversion");
+        }
+        setMovable();
+        setResultType(toType);
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmReinterpret)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    ALLOW_CLONE(MWasmReinterpret)
+};
+
+// Store a value infallibly to a statically known typed array.
+class MStoreTypedArrayElementStatic :
+    public MBinaryInstruction,
+    public StoreUnboxedScalarBase,
+    public StoreTypedArrayElementStaticPolicy::Data
+{
+    MStoreTypedArrayElementStatic(JSObject* someTypedArray, MDefinition* ptr, MDefinition* v,
+                                  int32_t offset = 0, bool needsBoundsCheck = true)
+        : MBinaryInstruction(classOpcode, ptr, v),
+          StoreUnboxedScalarBase(someTypedArray->as<TypedArrayObject>().type()),
+          someTypedArray_(someTypedArray),
+          offset_(offset), needsBoundsCheck_(needsBoundsCheck)
+    {}
+
+    CompilerObject someTypedArray_;
+
+    // An offset to be encoded in the store instruction - taking advantage of the
+    // addressing modes. This is only non-zero when the access is proven to be
+    // within bounds.
+    int32_t offset_;
+    bool needsBoundsCheck_;
+
+  public:
+    INSTRUCTION_HEADER(StoreTypedArrayElementStatic)
+    TRIVIAL_NEW_WRAPPERS
+
+    Scalar::Type accessType() const {
+        return writeType();
+    }
+
+    SharedMem<void*> base() const;
+    size_t length() const;
+
+    MDefinition* ptr() const { return getOperand(0); }
+    MDefinition* value() const { return getOperand(1); }
+    bool needsBoundsCheck() const { return needsBoundsCheck_; }
+    void setNeedsBoundsCheck(bool v) { needsBoundsCheck_ = v; }
+    int32_t offset() const { return offset_; }
+    void setOffset(int32_t offset) { offset_ = offset; }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::UnboxedElement);
+    }
+    TruncateKind operandTruncateKind(size_t index) const override;
+
+    bool canConsumeFloat32(MUse* use) const override {
+        return use == getUseFor(1) && accessType() == Scalar::Float32;
+    }
+    void collectRangeInfoPreTrunc() override;
+
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(someTypedArray_);
+    }
+};
+
+// Setting __proto__ in an object literal.
+class MMutateProto
+  : public MBinaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
+{
+  protected:
+    MMutateProto(MDefinition* obj, MDefinition* value)
+      : MBinaryInstruction(classOpcode, obj, value)
+    {
+        setResultType(MIRType::None);
+    }
+
+  public:
+    INSTRUCTION_HEADER(MutateProto)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, getObject), (1, getValue))
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+class MInitPropGetterSetter
+  : public MBinaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >::Data
+{
+    CompilerPropertyName name_;
+
+    MInitPropGetterSetter(MDefinition* obj, PropertyName* name, MDefinition* value)
+      : MBinaryInstruction(classOpcode, obj, value),
+        name_(name)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(InitPropGetterSetter)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, value))
+
+    PropertyName* name() const {
+        return name_;
+    }
+
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(name_);
+    }
+};
+
+class MGetDynamicName
+  : public MBinaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, ConvertToStringPolicy<1> >::Data
+{
+  protected:
+    MGetDynamicName(MDefinition* envChain, MDefinition* name)
+      : MBinaryInstruction(classOpcode, envChain, name)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(GetDynamicName)
+    NAMED_OPERANDS((0, getEnvironmentChain), (1, getName))
+
+    static MGetDynamicName*
+    New(TempAllocator& alloc, MDefinition* envChain, MDefinition* name) {
+        return new(alloc) MGetDynamicName(envChain, name);
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+class MCompare
+  : public MBinaryInstruction,
+    public ComparePolicy::Data
+{
+  public:
+    enum CompareType {
+
+        // Anything compared to Undefined
+        Compare_Undefined,
+
+        // Anything compared to Null
+        Compare_Null,
+
+        // Undefined compared to Boolean
+        // Null      compared to Boolean
+        // Double    compared to Boolean
+        // String    compared to Boolean
+        // Symbol    compared to Boolean
+        // Object    compared to Boolean
+        // Value     compared to Boolean
+        Compare_Boolean,
+
+        // Int32   compared to Int32
+        // Boolean compared to Boolean
+        Compare_Int32,
+        Compare_Int32MaybeCoerceBoth,
+        Compare_Int32MaybeCoerceLHS,
+        Compare_Int32MaybeCoerceRHS,
+
+        // Int32 compared as unsigneds
+        Compare_UInt32,
+
+        // Int64 compared to Int64.
+        Compare_Int64,
+
+        // Int64 compared as unsigneds.
+        Compare_UInt64,
+
+        // Double compared to Double
+        Compare_Double,
+
+        Compare_DoubleMaybeCoerceLHS,
+        Compare_DoubleMaybeCoerceRHS,
+
+        // Float compared to Float
+        Compare_Float32,
+
+        // String compared to String
+        Compare_String,
+
+        // Symbol compared to Symbol
+        Compare_Symbol,
+
+        // Undefined compared to String
+        // Null      compared to String
+        // Boolean   compared to String
+        // Int32     compared to String
+        // Double    compared to String
+        // Object    compared to String
+        // Value     compared to String
+        Compare_StrictString,
+
+        // Object compared to Object
+        Compare_Object,
+
+        // Compare 2 values bitwise
+        Compare_Bitwise,
+
+        // All other possible compares
+        Compare_Unknown
+    };
+
+  private:
+    CompareType compareType_;
+    JSOp jsop_;
+    bool operandMightEmulateUndefined_;
+    bool operandsAreNeverNaN_;
+
+    // When a floating-point comparison is converted to an integer comparison
+    // (when range analysis proves it safe), we need to convert the operands
+    // to integer as well.
+    bool truncateOperands_;
+
+    MCompare(MDefinition* left, MDefinition* right, JSOp jsop)
+      : MBinaryInstruction(classOpcode, left, right),
+        compareType_(Compare_Unknown),
+        jsop_(jsop),
+        operandMightEmulateUndefined_(true),
+        operandsAreNeverNaN_(false),
+        truncateOperands_(false)
+    {
+        setResultType(MIRType::Boolean);
+        setMovable();
+    }
+
+    MCompare(MDefinition* left, MDefinition* right, JSOp jsop, CompareType compareType)
+      : MCompare(left, right, jsop)
+    {
+        MOZ_ASSERT(compareType == Compare_Int32 || compareType == Compare_UInt32 ||
+                   compareType == Compare_Int64 || compareType == Compare_UInt64 ||
+                   compareType == Compare_Double || compareType == Compare_Float32);
+        compareType_ = compareType;
+        operandMightEmulateUndefined_ = false;
+        setResultType(MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(Compare)
+    TRIVIAL_NEW_WRAPPERS
+
+    MOZ_MUST_USE bool tryFold(bool* result);
+    MOZ_MUST_USE bool evaluateConstantOperands(TempAllocator& alloc, bool* result);
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    void filtersUndefinedOrNull(bool trueBranch, MDefinition** subject, bool* filtersUndefined,
+                                bool* filtersNull);
+
+    CompareType compareType() const {
+        return compareType_;
+    }
+    bool isInt32Comparison() const {
+        return compareType() == Compare_Int32 ||
+               compareType() == Compare_Int32MaybeCoerceBoth ||
+               compareType() == Compare_Int32MaybeCoerceLHS ||
+               compareType() == Compare_Int32MaybeCoerceRHS;
+    }
+    bool isDoubleComparison() const {
+        return compareType() == Compare_Double ||
+               compareType() == Compare_DoubleMaybeCoerceLHS ||
+               compareType() == Compare_DoubleMaybeCoerceRHS;
+    }
+    bool isFloat32Comparison() const {
+        return compareType() == Compare_Float32;
+    }
+    bool isNumericComparison() const {
+        return isInt32Comparison() ||
+               isDoubleComparison() ||
+               isFloat32Comparison();
+    }
+    void setCompareType(CompareType type) {
+        compareType_ = type;
+    }
+    MIRType inputType();
+
+    JSOp jsop() const {
+        return jsop_;
+    }
+    void markNoOperandEmulatesUndefined() {
+        operandMightEmulateUndefined_ = false;
+    }
+    bool operandMightEmulateUndefined() const {
+        return operandMightEmulateUndefined_;
+    }
+    bool operandsAreNeverNaN() const {
+        return operandsAreNeverNaN_;
+    }
+    AliasSet getAliasSet() const override {
+        // Strict equality is never effectful.
+        if (jsop_ == JSOP_STRICTEQ || jsop_ == JSOP_STRICTNE)
+            return AliasSet::None();
+        if (compareType_ == Compare_Unknown)
+            return AliasSet::Store(AliasSet::Any);
+        MOZ_ASSERT(compareType_ <= Compare_Bitwise);
+        return AliasSet::None();
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+    void collectRangeInfoPreTrunc() override;
+
+    void trySpecializeFloat32(TempAllocator& alloc) override;
+    bool isFloat32Commutative() const override { return true; }
+    bool needTruncation(TruncateKind kind) override;
+    void truncate() override;
+    TruncateKind operandTruncateKind(size_t index) const override;
+
+    static CompareType determineCompareType(JSOp op, MDefinition* left, MDefinition* right);
+    void cacheOperandMightEmulateUndefined(CompilerConstraintList* constraints);
+
+# ifdef DEBUG
+    bool isConsistentFloat32Use(MUse* use) const override {
+        // Both sides of the compare can be Float32
+        return compareType_ == Compare_Float32;
+    }
+# endif
+
+    ALLOW_CLONE(MCompare)
+
+  protected:
+    MOZ_MUST_USE bool tryFoldEqualOperands(bool* result);
+    MOZ_MUST_USE bool tryFoldTypeOf(bool* result);
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!binaryCongruentTo(ins))
+            return false;
+        return compareType() == ins->toCompare()->compareType() &&
+               jsop() == ins->toCompare()->jsop();
+    }
+};
+
+// Caller-side allocation of |this| for |new|:
+// Constructs |this| when possible, else MagicValue(JS_IS_CONSTRUCTING).
+class MCreateThis
+  : public MBinaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >::Data
+{
+    explicit MCreateThis(MDefinition* callee, MDefinition* newTarget)
+      : MBinaryInstruction(classOpcode, callee, newTarget)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(CreateThis)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, getCallee), (1, getNewTarget))
+
+    // Although creation of |this| modifies global state, it is safely repeatable.
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+class MSetArgumentsObjectArg
+  : public MBinaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
+{
+    size_t argno_;
+
+    MSetArgumentsObjectArg(MDefinition* argsObj, size_t argno, MDefinition* value)
+      : MBinaryInstruction(classOpcode, argsObj, value),
+        argno_(argno)
+    {
+    }
+
+  public:
+    INSTRUCTION_HEADER(SetArgumentsObjectArg)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, getArgsObject), (1, getValue))
+
+    size_t argno() const {
+        return argno_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::Any);
+    }
+};
+
+// Given a MIRType::Value A and a MIRType::Object B:
+// If the Value may be safely unboxed to an Object, return Object(A).
+// Otherwise, return B.
+// Used to implement return behavior for inlined constructors.
+class MReturnFromCtor
+  : public MBinaryInstruction,
+    public MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >::Data
+{
+    MReturnFromCtor(MDefinition* value, MDefinition* object)
+      : MBinaryInstruction(classOpcode, value, object)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ReturnFromCtor)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, getValue), (1, getObject))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MBinaryBitwiseInstruction
+  : public MBinaryInstruction,
+    public BitwisePolicy::Data
+{
+  protected:
+    MBinaryBitwiseInstruction(Opcode op, MDefinition* left, MDefinition* right, MIRType type)
+      : MBinaryInstruction(op, left, right), maskMatchesLeftRange(false),
+        maskMatchesRightRange(false)
+    {
+        MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
+        setResultType(type);
+        setMovable();
+    }
+
+    void specializeAs(MIRType type);
+    bool maskMatchesLeftRange;
+    bool maskMatchesRightRange;
+
+  public:
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    MDefinition* foldUnnecessaryBitop();
+    virtual MDefinition* foldIfZero(size_t operand) = 0;
+    virtual MDefinition* foldIfNegOne(size_t operand) = 0;
+    virtual MDefinition* foldIfEqual()  = 0;
+    virtual MDefinition* foldIfAllBitsSet(size_t operand)  = 0;
+    virtual void infer(BaselineInspector* inspector, jsbytecode* pc);
+    void collectRangeInfoPreTrunc() override;
+
+    void setInt32Specialization() {
+        specialization_ = MIRType::Int32;
+        setResultType(MIRType::Int32);
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return binaryCongruentTo(ins);
+    }
+    AliasSet getAliasSet() const override {
+        if (specialization_ >= MIRType::Object)
+            return AliasSet::Store(AliasSet::Any);
+        return AliasSet::None();
+    }
+
+    TruncateKind operandTruncateKind(size_t index) const override;
+};
+
+class MBinaryArithInstruction
+  : public MBinaryInstruction,
+    public ArithPolicy::Data
+{
+    // Implicit truncate flag is set by the truncate backward range analysis
+    // optimization phase, and by wasm pre-processing. It is used in
+    // NeedNegativeZeroCheck to check if the result of a multiplication needs to
+    // produce -0 double value, and for avoiding overflow checks.
+
+    // This optimization happens when the multiplication cannot be truncated
+    // even if all uses are truncating its result, such as when the range
+    // analysis detect a precision loss in the multiplication.
+    TruncateKind implicitTruncate_;
+
+    // Whether we must preserve NaN semantics, and in particular not fold
+    // (x op id) or (id op x) to x, or replace a division by a multiply of the
+    // exact reciprocal.
+    bool mustPreserveNaN_;
+
+  public:
+    MBinaryArithInstruction(Opcode op, MDefinition* left, MDefinition* right)
+      : MBinaryInstruction(op, left, right),
+        implicitTruncate_(NoTruncate),
+        mustPreserveNaN_(false)
+    {
+        specialization_ = MIRType::None;
+        setMovable();
+    }
+
+    static MBinaryArithInstruction* New(TempAllocator& alloc, Opcode op,
+                                        MDefinition* left, MDefinition* right);
+
+    bool constantDoubleResult(TempAllocator& alloc);
+
+    void setMustPreserveNaN(bool b) { mustPreserveNaN_ = b; }
+    bool mustPreserveNaN() const { return mustPreserveNaN_; }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    void printOpcode(GenericPrinter& out) const override;
+
+    virtual double getIdentity() = 0;
+
+    void setSpecialization(MIRType type) {
+        specialization_ = type;
+        setResultType(type);
+    }
+    void setInt32Specialization() {
+        specialization_ = MIRType::Int32;
+        setResultType(MIRType::Int32);
+    }
+    void setNumberSpecialization(TempAllocator& alloc, BaselineInspector* inspector, jsbytecode* pc);
+
+    virtual void trySpecializeFloat32(TempAllocator& alloc) override;
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!binaryCongruentTo(ins))
+            return false;
+        const auto* other = static_cast<const MBinaryArithInstruction*>(ins);
+        return other->mustPreserveNaN_ == mustPreserveNaN_;
+    }
+    AliasSet getAliasSet() const override {
+        if (specialization_ >= MIRType::Object)
+            return AliasSet::Store(AliasSet::Any);
+        return AliasSet::None();
+    }
+
+    bool isTruncated() const {
+        return implicitTruncate_ == Truncate;
+    }
+    TruncateKind truncateKind() const {
+        return implicitTruncate_;
+    }
+    void setTruncateKind(TruncateKind kind) {
+        implicitTruncate_ = Max(implicitTruncate_, kind);
+    }
+};
+
+class MMinMax
+  : public MBinaryInstruction,
+    public ArithPolicy::Data
+{
+    bool isMax_;
+
+    MMinMax(MDefinition* left, MDefinition* right, MIRType type, bool isMax)
+      : MBinaryInstruction(classOpcode, left, right),
+        isMax_(isMax)
+    {
+        MOZ_ASSERT(IsNumberType(type));
+        setResultType(type);
+        setMovable();
+        specialization_ = type;
+    }
+
+  public:
+    INSTRUCTION_HEADER(MinMax)
+    TRIVIAL_NEW_WRAPPERS
+
+    static MMinMax* NewWasm(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+                            MIRType type, bool isMax)
+    {
+        return New(alloc, left, right, type, isMax);
+    }
+
+    bool isMax() const {
+        return isMax_;
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!congruentIfOperandsEqual(ins))
+            return false;
+        const MMinMax* other = ins->toMinMax();
+        return other->isMax() == isMax();
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    void computeRange(TempAllocator& alloc) override;
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    bool isFloat32Commutative() const override { return true; }
+    void trySpecializeFloat32(TempAllocator& alloc) override;
+
+    ALLOW_CLONE(MMinMax)
+};
+
+class MCopySign
+  : public MBinaryInstruction,
+    public NoTypePolicy::Data
+{
+    MCopySign(MDefinition* lhs, MDefinition* rhs, MIRType type)
+      : MBinaryInstruction(classOpcode, lhs, rhs)
+    {
+        setResultType(type);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(CopySign)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    ALLOW_CLONE(MCopySign)
+};
+
+// Inline implementation of atan2 (arctangent of y/x).
+class MAtan2
+  : public MBinaryInstruction,
+    public MixPolicy<DoublePolicy<0>, DoublePolicy<1> >::Data
+{
+    MAtan2(MDefinition* y, MDefinition* x)
+      : MBinaryInstruction(classOpcode, y, x)
+    {
+        setResultType(MIRType::Double);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(Atan2)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, y), (1, x))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MAtan2)
+};
+
+// Inline implementation of Math.pow().
+class MPow
+  : public MBinaryInstruction,
+    public PowPolicy::Data
+{
+    MPow(MDefinition* input, MDefinition* power, MIRType powerType)
+      : MBinaryInstruction(classOpcode, input, power)
+    {
+        MOZ_ASSERT(powerType == MIRType::Double ||
+                   powerType == MIRType::Int32 ||
+                   powerType == MIRType::None);
+        specialization_ = powerType;
+        if (powerType == MIRType::None)
+            setResultType(MIRType::Value);
+        else
+            setResultType(MIRType::Double);
+        setMovable();
+    }
+
+    // Helpers for `foldsTo`
+    MDefinition* foldsConstant(TempAllocator &alloc);
+    MDefinition* foldsConstantPower(TempAllocator &alloc);
+
+  public:
+    INSTRUCTION_HEADER(Pow)
+    TRIVIAL_NEW_WRAPPERS
+
+    MDefinition* input() const {
+        return lhs();
+    }
+    MDefinition* power() const {
+        return rhs();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        if (specialization_ == MIRType::None)
+            return AliasSet::Store(AliasSet::Any);
+        return AliasSet::None();
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return specialization_ != MIRType::None;
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    ALLOW_CLONE(MPow)
+};
+
+class MConcat
+  : public MBinaryInstruction,
+    public MixPolicy<ConvertToStringPolicy<0>, ConvertToStringPolicy<1> >::Data
+{
+    MConcat(MDefinition* left, MDefinition* right)
+      : MBinaryInstruction(classOpcode, left, right)
+    {
+        // At least one input should be definitely string
+        MOZ_ASSERT(left->type() == MIRType::String || right->type() == MIRType::String);
+
+        setMovable();
+        setResultType(MIRType::String);
+    }
+
+  public:
+    INSTRUCTION_HEADER(Concat)
+    TRIVIAL_NEW_WRAPPERS
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MConcat)
+};
+
+class MCharCodeAt
+  : public MBinaryInstruction,
+    public MixPolicy<StringPolicy<0>, IntPolicy<1> >::Data
+{
+    MCharCodeAt(MDefinition* str, MDefinition* index)
+        : MBinaryInstruction(classOpcode, str, index)
+    {
+        setMovable();
+        setResultType(MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(CharCodeAt)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    virtual AliasSet getAliasSet() const override {
+        // Strings are immutable, so there is no implicit dependency.
+        return AliasSet::None();
+    }
+
+    void computeRange(TempAllocator& alloc) override;
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    ALLOW_CLONE(MCharCodeAt)
+};
+
+class MStringSplit
+  : public MBinaryInstruction,
+    public MixPolicy<StringPolicy<0>, StringPolicy<1> >::Data
+{
+    CompilerObjectGroup group_;
+
+    MStringSplit(TempAllocator& alloc, CompilerConstraintList* constraints, MDefinition* string,
+                 MDefinition* sep, ObjectGroup* group)
+      : MBinaryInstruction(classOpcode, string, sep),
+        group_(group)
+    {
+        setResultType(MIRType::Object);
+        TemporaryTypeSet* types = MakeSingletonTypeSet(alloc, constraints, group);
+        setResultTypeSet(types);
+    }
+
+  public:
+    INSTRUCTION_HEADER(StringSplit)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+    NAMED_OPERANDS((0, string), (1, separator))
+
+    ObjectGroup* group() const {
+        return group_;
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+    virtual AliasSet getAliasSet() const override {
+        // Although this instruction returns a new array, we don't have to mark
+        // it as store instruction, see also MNewArray.
+        return AliasSet::None();
+    }
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(group_);
+    }
+};
+
+// Replaces the datum in the given lane by a scalar value of the same type.
+class MSimdInsertElement
+  : public MBinaryInstruction,
+    public MixPolicy< SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >::Data
+{
+  private:
+    unsigned lane_;
+
+    MSimdInsertElement(MDefinition* vec, MDefinition* val, unsigned lane)
+      : MBinaryInstruction(classOpcode, vec, val), lane_(lane)
+    {
+        MIRType type = vec->type();
+        MOZ_ASSERT(IsSimdType(type));
+        MOZ_ASSERT(lane < SimdTypeToLength(type));
+        setMovable();
+        setResultType(type);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdInsertElement)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, vector), (1, value))
+
+    unsigned lane() const {
+        return lane_;
+    }
+
+    bool canConsumeFloat32(MUse* use) const override {
+        return use == getUseFor(1) && SimdTypeToLaneType(type()) == MIRType::Float32;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return binaryCongruentTo(ins) && lane_ == ins->toSimdInsertElement()->lane();
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    ALLOW_CLONE(MSimdInsertElement)
+};
+
+// Applies a shuffle operation to the inputs. The lane indexes select a source
+// lane from the concatenation of the two input vectors.
+class MSimdShuffle
+  : public MBinaryInstruction,
+    public MSimdShuffleBase,
+    public NoTypePolicy::Data
+{
+    MSimdShuffle(MDefinition* lhs, MDefinition* rhs, const uint8_t lanes[])
+      : MBinaryInstruction(classOpcode, lhs, rhs), MSimdShuffleBase(lanes, lhs->type())
+    {
+        MOZ_ASSERT(IsSimdType(lhs->type()));
+        MOZ_ASSERT(IsSimdType(rhs->type()));
+        MOZ_ASSERT(lhs->type() == rhs->type());
+        for (unsigned i = 0; i < arity_; i++)
+            MOZ_ASSERT(lane(i) < 2 * arity_);
+        setResultType(lhs->type());
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdShuffle)
+
+    static MInstruction* New(TempAllocator& alloc, MDefinition* lhs, MDefinition* rhs,
+                             const uint8_t lanes[])
+    {
+        unsigned arity = SimdTypeToLength(lhs->type());
+
+        // Swap operands so that new lanes come from LHS in majority.
+        // In the balanced case, swap operands if needs be, in order to be able
+        // to do only one vshufps on x86.
+        unsigned lanesFromLHS = 0;
+        for (unsigned i = 0; i < arity; i++) {
+            if (lanes[i] < arity)
+                lanesFromLHS++;
+        }
+
+        if (lanesFromLHS < arity / 2 ||
+            (arity == 4 && lanesFromLHS == 2 && lanes[0] >= 4 && lanes[1] >= 4)) {
+            mozilla::Array<uint8_t, 16> newLanes;
+            for (unsigned i = 0; i < arity; i++)
+                newLanes[i] = (lanes[i] + arity) % (2 * arity);
+            return New(alloc, rhs, lhs, &newLanes[0]);
+        }
+
+        // If all lanes come from the same vector, just use swizzle instead.
+        if (lanesFromLHS == arity)
+            return MSimdSwizzle::New(alloc, lhs, lanes);
+
+        return new(alloc) MSimdShuffle(lhs, rhs, lanes);
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isSimdShuffle())
+            return false;
+        const MSimdShuffle* other = ins->toSimdShuffle();
+        return sameLanes(other) && binaryCongruentTo(other);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    ALLOW_CLONE(MSimdShuffle)
+};
+
+// Compares each value of a SIMD vector to each corresponding lane's value of
+// another SIMD vector, and returns a boolean vector containing the results of
+// the comparison: all bits are set to 1 if the comparison is true, 0 otherwise.
+// When comparing integer vectors, a SimdSign must be provided to request signed
+// or unsigned comparison.
+class MSimdBinaryComp
+  : public MBinaryInstruction,
+    public SimdAllPolicy::Data
+{
+  public:
+    enum Operation {
+#define NAME_(x) x,
+        FOREACH_COMP_SIMD_OP(NAME_)
+#undef NAME_
+    };
+
+    static const char* OperationName(Operation op) {
+        switch (op) {
+#define NAME_(x) case x: return #x;
+        FOREACH_COMP_SIMD_OP(NAME_)
+#undef NAME_
+        }
+        MOZ_CRASH("unexpected operation");
+    }
+
+  private:
+    Operation operation_;
+    SimdSign sign_;
+
+    MSimdBinaryComp(MDefinition* left, MDefinition* right, Operation op, SimdSign sign)
+      : MBinaryInstruction(classOpcode, left, right), operation_(op), sign_(sign)
+    {
+        MOZ_ASSERT(left->type() == right->type());
+        MIRType opType = left->type();
+        MOZ_ASSERT(IsSimdType(opType));
+        MOZ_ASSERT((sign != SimdSign::NotApplicable) == IsIntegerSimdType(opType),
+                   "Signedness must be specified for integer SIMD compares");
+        setResultType(MIRTypeToBooleanSimdType(opType));
+        specialization_ = opType;
+        setMovable();
+        if (op == equal || op == notEqual)
+            setCommutative();
+    }
+
+    static MSimdBinaryComp* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+                                Operation op, SimdSign sign)
+    {
+        return new (alloc) MSimdBinaryComp(left, right, op, sign);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdBinaryComp)
+
+    // Create a MSimdBinaryComp or an equivalent sequence of instructions
+    // supported by the current target.
+    // Add all instructions to the basic block |addTo|.
+    static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+                                      MDefinition* right, Operation op, SimdSign sign);
+
+    AliasSet getAliasSet() const override
+    {
+        return AliasSet::None();
+    }
+
+    Operation operation() const { return operation_; }
+    SimdSign signedness() const { return sign_; }
+    MIRType specialization() const { return specialization_; }
+
+    // Swap the operands and reverse the comparison predicate.
+    void reverse() {
+        switch (operation()) {
+          case greaterThan:        operation_ = lessThan; break;
+          case greaterThanOrEqual: operation_ = lessThanOrEqual; break;
+          case lessThan:           operation_ = greaterThan; break;
+          case lessThanOrEqual:    operation_ = greaterThanOrEqual; break;
+          case equal:
+          case notEqual:
+            break;
+          default: MOZ_CRASH("Unexpected compare operation");
+        }
+        swapOperands();
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!binaryCongruentTo(ins))
+            return false;
+        const MSimdBinaryComp* other = ins->toSimdBinaryComp();
+        return specialization_ == other->specialization() &&
+               operation_ == other->operation() &&
+               sign_ == other->signedness();
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    ALLOW_CLONE(MSimdBinaryComp)
+};
+
+class MSimdBinaryArith
+  : public MBinaryInstruction,
+    public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >::Data
+{
+  public:
+    enum Operation {
+#define OP_LIST_(OP) Op_##OP,
+        FOREACH_NUMERIC_SIMD_BINOP(OP_LIST_)
+        FOREACH_FLOAT_SIMD_BINOP(OP_LIST_)
+#undef OP_LIST_
+    };
+
+    static const char* OperationName(Operation op) {
+        switch (op) {
+#define OP_CASE_LIST_(OP) case Op_##OP: return #OP;
+          FOREACH_NUMERIC_SIMD_BINOP(OP_CASE_LIST_)
+          FOREACH_FLOAT_SIMD_BINOP(OP_CASE_LIST_)
+#undef OP_CASE_LIST_
+        }
+        MOZ_CRASH("unexpected operation");
+    }
+
+  private:
+    Operation operation_;
+
+    MSimdBinaryArith(MDefinition* left, MDefinition* right, Operation op)
+      : MBinaryInstruction(classOpcode, left, right), operation_(op)
+    {
+        MOZ_ASSERT(left->type() == right->type());
+        MIRType type = left->type();
+        MOZ_ASSERT(IsSimdType(type));
+        MOZ_ASSERT_IF(IsIntegerSimdType(type), op == Op_add || op == Op_sub || op == Op_mul);
+        setResultType(type);
+        setMovable();
+        if (op == Op_add || op == Op_mul || op == Op_min || op == Op_max)
+            setCommutative();
+    }
+
+    static MSimdBinaryArith* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+                                 Operation op)
+    {
+        return new (alloc) MSimdBinaryArith(left, right, op);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdBinaryArith)
+
+    // Create an MSimdBinaryArith instruction and add it to the basic block. Possibly
+    // create and add an equivalent sequence of instructions instead if the
+    // current target doesn't support the requested shift operation directly.
+    static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+                                      MDefinition* right, Operation op);
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    Operation operation() const { return operation_; }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!binaryCongruentTo(ins))
+            return false;
+        return operation_ == ins->toSimdBinaryArith()->operation();
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    ALLOW_CLONE(MSimdBinaryArith)
+};
+
+class MSimdBinarySaturating
+  : public MBinaryInstruction,
+    public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1>>::Data
+{
+  public:
+    enum Operation
+    {
+        add,
+        sub,
+    };
+
+    static const char* OperationName(Operation op)
+    {
+        switch (op) {
+          case add:
+            return "add";
+          case sub:
+            return "sub";
+        }
+        MOZ_CRASH("unexpected operation");
+    }
+
+  private:
+    Operation operation_;
+    SimdSign sign_;
+
+    MSimdBinarySaturating(MDefinition* left, MDefinition* right, Operation op, SimdSign sign)
+      : MBinaryInstruction(classOpcode, left, right)
+      , operation_(op)
+      , sign_(sign)
+    {
+        MOZ_ASSERT(left->type() == right->type());
+        MIRType type = left->type();
+        MOZ_ASSERT(type == MIRType::Int8x16 || type == MIRType::Int16x8);
+        setResultType(type);
+        setMovable();
+        if (op == add)
+            setCommutative();
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdBinarySaturating)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override { return AliasSet::None(); }
+
+    Operation operation() const { return operation_; }
+    SimdSign signedness() const { return sign_; }
+
+    bool congruentTo(const MDefinition* ins) const override
+    {
+        if (!binaryCongruentTo(ins))
+            return false;
+        return operation_ == ins->toSimdBinarySaturating()->operation() &&
+               sign_ == ins->toSimdBinarySaturating()->signedness();
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    ALLOW_CLONE(MSimdBinarySaturating)
+};
+
+class MSimdBinaryBitwise
+  : public MBinaryInstruction,
+    public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >::Data
+{
+  public:
+    enum Operation {
+        and_,
+        or_,
+        xor_
+    };
+
+    static const char* OperationName(Operation op) {
+        switch (op) {
+          case and_: return "and";
+          case or_:  return "or";
+          case xor_: return "xor";
+        }
+        MOZ_CRASH("unexpected operation");
+    }
+
+  private:
+    Operation operation_;
+
+    MSimdBinaryBitwise(MDefinition* left, MDefinition* right, Operation op)
+      : MBinaryInstruction(classOpcode, left, right), operation_(op)
+    {
+        MOZ_ASSERT(left->type() == right->type());
+        MIRType type = left->type();
+        MOZ_ASSERT(IsSimdType(type));
+        setResultType(type);
+        setMovable();
+        setCommutative();
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdBinaryBitwise)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    Operation operation() const { return operation_; }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!binaryCongruentTo(ins))
+            return false;
+        return operation_ == ins->toSimdBinaryBitwise()->operation();
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    ALLOW_CLONE(MSimdBinaryBitwise)
+};
+
+class MSimdShift
+  : public MBinaryInstruction,
+    public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >::Data
+{
+  public:
+    enum Operation {
+        lsh,
+        rsh,
+        ursh
+    };
+
+  private:
+    Operation operation_;
+
+    MSimdShift(MDefinition* left, MDefinition* right, Operation op)
+      : MBinaryInstruction(classOpcode, left, right), operation_(op)
+    {
+        MIRType type = left->type();
+        MOZ_ASSERT(IsIntegerSimdType(type));
+        setResultType(type);
+        setMovable();
+    }
+
+    static MSimdShift* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+                           Operation op)
+    {
+        return new (alloc) MSimdShift(left, right, op);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdShift)
+
+    // Create an MSimdShift instruction and add it to the basic block. Possibly
+    // create and add an equivalent sequence of instructions instead if the
+    // current target doesn't support the requested shift operation directly.
+    // Return the inserted MInstruction that computes the shifted value.
+    static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+                                      MDefinition* right, Operation op);
+
+    // Get the relevant right shift operation given the signedness of a type.
+    static Operation rshForSign(SimdSign sign) {
+        return sign == SimdSign::Unsigned ? ursh : rsh;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    Operation operation() const { return operation_; }
+
+    static const char* OperationName(Operation op) {
+        switch (op) {
+          case lsh:  return "lsh";
+          case rsh:  return "rsh-arithmetic";
+          case ursh: return "rsh-logical";
+        }
+        MOZ_CRASH("unexpected operation");
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!binaryCongruentTo(ins))
+            return false;
+        return operation_ == ins->toSimdShift()->operation();
+    }
+
+    ALLOW_CLONE(MSimdShift)
+};
+
+class MBinarySharedStub
+  : public MBinaryInstruction,
+    public MixPolicy<BoxPolicy<0>, BoxPolicy<1> >::Data
+{
+  protected:
+    explicit MBinarySharedStub(MDefinition* left, MDefinition* right)
+      : MBinaryInstruction(classOpcode, left, right)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(BinarySharedStub)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+class MDefFun
+  : public MBinaryInstruction,
+    public ObjectPolicy<0>::Data
+{
+  private:
+    MDefFun(MDefinition* fun, MDefinition* envChain)
+      : MBinaryInstruction(classOpcode, fun, envChain)
+    {}
+
+  public:
+    INSTRUCTION_HEADER(DefFun)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, fun), (1, environmentChain))
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+class MRegExpInstanceOptimizable
+  : public MBinaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >::Data
+{
+    explicit MRegExpInstanceOptimizable(MDefinition* object, MDefinition* proto)
+      : MBinaryInstruction(classOpcode, object, proto)
+    {
+        setResultType(MIRType::Boolean);
+    }
+
+  public:
+    INSTRUCTION_HEADER(RegExpInstanceOptimizable)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, proto))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MLambda
+  : public MBinaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    const LambdaFunctionInfo info_;
+
+    MLambda(TempAllocator& alloc, CompilerConstraintList* constraints, MDefinition* envChain,
+            MConstant* cst)
+      : MBinaryInstruction(classOpcode, envChain, cst),
+        info_(&cst->toObject().as<JSFunction>())
+    {
+        setResultType(MIRType::Object);
+        if (!info().fun->isSingleton() && !ObjectGroup::useSingletonForClone(info().fun))
+            setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, info().fun));
+    }
+
+  public:
+    INSTRUCTION_HEADER(Lambda)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+    NAMED_OPERANDS((0, environmentChain))
+
+    MConstant* functionOperand() const {
+        return getOperand(1)->toConstant();
+    }
+    const LambdaFunctionInfo& info() const {
+        return info_;
+    }
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return info_.appendRoots(roots);
+    }
+};
+
+class MSetFunName
+  : public MBinaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
+{
+    uint8_t prefixKind_;
+
+    explicit MSetFunName(MDefinition* fun, MDefinition* name, uint8_t prefixKind)
+      : MBinaryInstruction(classOpcode, fun, name),
+        prefixKind_(prefixKind)
+    {
+        setResultType(MIRType::None);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SetFunName)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, fun), (1, name))
+
+    uint8_t prefixKind() const {
+        return prefixKind_;
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+// If |elements| has the CONVERT_DOUBLE_ELEMENTS flag, convert value to
+// double. Else return the original value.
+class MMaybeToDoubleElement
+  : public MBinaryInstruction,
+    public IntPolicy<1>::Data
+{
+    MMaybeToDoubleElement(MDefinition* elements, MDefinition* value)
+      : MBinaryInstruction(classOpcode, elements, value)
+    {
+        MOZ_ASSERT(elements->type() == MIRType::Elements);
+        setMovable();
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(MaybeToDoubleElement)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, value))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+};
+
+// Store to the initialized length in an elements header. Note the input is an
+// *index*, one less than the desired length.
+class MSetInitializedLength
+  : public MBinaryInstruction,
+    public NoTypePolicy::Data
+{
+    MSetInitializedLength(MDefinition* elements, MDefinition* index)
+      : MBinaryInstruction(classOpcode, elements, index)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(SetInitializedLength)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::ObjectFields);
+    }
+
+    ALLOW_CLONE(MSetInitializedLength)
+};
+
+// Store to the length in an elements header. Note the input is an *index*, one
+// less than the desired length.
+class MSetArrayLength
+  : public MBinaryInstruction,
+    public NoTypePolicy::Data
+{
+    MSetArrayLength(MDefinition* elements, MDefinition* index)
+      : MBinaryInstruction(classOpcode, elements, index)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(SetArrayLength)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::ObjectFields);
+    }
+
+    // By default no, unless built as a recovered instruction.
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return isRecoveredOnBailout();
+    }
+};
+
+class MGetNextEntryForIterator
+  : public MBinaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >::Data
+{
+  public:
+    enum Mode {
+        Map,
+        Set
+    };
+
+  private:
+    Mode mode_;
+
+    explicit MGetNextEntryForIterator(MDefinition* iter, MDefinition* result, Mode mode)
+      : MBinaryInstruction(classOpcode, iter, result), mode_(mode)
+    {
+        setResultType(MIRType::Boolean);
+    }
+
+  public:
+    INSTRUCTION_HEADER(GetNextEntryForIterator)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, iter), (1, result))
+
+    Mode mode() const {
+        return mode_;
+    }
+};
+
+// Inlined version of the js::SetTypedObjectOffset() intrinsic.
+class MSetTypedObjectOffset
+  : public MBinaryInstruction,
+    public NoTypePolicy::Data
+{
+  private:
+    MSetTypedObjectOffset(MDefinition* object, MDefinition* offset)
+      : MBinaryInstruction(classOpcode, object, offset)
+    {
+        MOZ_ASSERT(object->type() == MIRType::Object);
+        MOZ_ASSERT(offset->type() == MIRType::Int32);
+        setResultType(MIRType::None);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SetTypedObjectOffset)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, offset))
+
+    AliasSet getAliasSet() const override {
+        // This affects the result of MTypedObjectElements,
+        // which is described as a load of ObjectFields.
+        return AliasSet::Store(AliasSet::ObjectFields);
+    }
+};
+
+// Bailout if index + minimum < 0 or index + maximum >= length. The length used
+// in a bounds check must not be negative, or the wrong result may be computed
+// (unsigned comparisons may be used).
+class MBoundsCheck
+  : public MBinaryInstruction,
+    public MixPolicy<IntPolicy<0>, IntPolicy<1>>::Data
+{
+    // Range over which to perform the bounds check, may be modified by GVN.
+    int32_t minimum_;
+    int32_t maximum_;
+    bool fallible_;
+
+    MBoundsCheck(MDefinition* index, MDefinition* length)
+      : MBinaryInstruction(classOpcode, index, length),
+        minimum_(0), maximum_(0), fallible_(true)
+    {
+        setGuard();
+        setMovable();
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+        MOZ_ASSERT(length->type() == MIRType::Int32);
+
+        // Returns the checked index.
+        setResultType(MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(BoundsCheck)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, index), (1, length))
+
+    int32_t minimum() const {
+        return minimum_;
+    }
+    void setMinimum(int32_t n) {
+        MOZ_ASSERT(fallible_);
+        minimum_ = n;
+    }
+    int32_t maximum() const {
+        return maximum_;
+    }
+    void setMaximum(int32_t n) {
+        MOZ_ASSERT(fallible_);
+        maximum_ = n;
+    }
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isBoundsCheck())
+            return false;
+        const MBoundsCheck* other = ins->toBoundsCheck();
+        if (minimum() != other->minimum() || maximum() != other->maximum())
+            return false;
+        if (fallible() != other->fallible())
+            return false;
+        return congruentIfOperandsEqual(other);
+    }
+    virtual AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    void computeRange(TempAllocator& alloc) override;
+    bool fallible() const {
+        return fallible_;
+    }
+    void collectRangeInfoPreTrunc() override;
+
+    ALLOW_CLONE(MBoundsCheck)
+};
+
+// Load a value from a dense array's element vector and does a hole check if the
+// array is not known to be packed.
+class MLoadElement
+  : public MBinaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    bool needsHoleCheck_;
+    bool loadDoubles_;
+    int32_t offsetAdjustment_;
+
+    MLoadElement(MDefinition* elements, MDefinition* index,
+                 bool needsHoleCheck, bool loadDoubles, int32_t offsetAdjustment = 0)
+      : MBinaryInstruction(classOpcode, elements, index),
+        needsHoleCheck_(needsHoleCheck),
+        loadDoubles_(loadDoubles),
+        offsetAdjustment_(offsetAdjustment)
+    {
+        if (needsHoleCheck) {
+            // Uses may be optimized away based on this instruction's result
+            // type. This means it's invalid to DCE this instruction, as we
+            // have to invalidate when we read a hole.
+            setGuard();
+        }
+        setResultType(MIRType::Value);
+        setMovable();
+        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(LoadElement)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index))
+
+    bool needsHoleCheck() const {
+        return needsHoleCheck_;
+    }
+    bool loadDoubles() const {
+        return loadDoubles_;
+    }
+    int32_t offsetAdjustment() const {
+        return offsetAdjustment_;
+    }
+    bool fallible() const {
+        return needsHoleCheck();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isLoadElement())
+            return false;
+        const MLoadElement* other = ins->toLoadElement();
+        if (needsHoleCheck() != other->needsHoleCheck())
+            return false;
+        if (loadDoubles() != other->loadDoubles())
+            return false;
+        if (offsetAdjustment() != other->offsetAdjustment())
+            return false;
+        return congruentIfOperandsEqual(other);
+    }
+    AliasType mightAlias(const MDefinition* store) const override;
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::Element);
+    }
+
+    ALLOW_CLONE(MLoadElement)
+};
+
+class MLoadUnboxedObjectOrNull
+  : public MBinaryInstruction,
+    public SingleObjectPolicy::Data
+{
+  public:
+    enum NullBehavior {
+        HandleNull,
+        BailOnNull,
+        NullNotPossible
+    };
+
+  private:
+    NullBehavior nullBehavior_;
+    int32_t offsetAdjustment_;
+
+    MLoadUnboxedObjectOrNull(MDefinition* elements, MDefinition* index,
+                             NullBehavior nullBehavior, int32_t offsetAdjustment)
+      : MBinaryInstruction(classOpcode, elements, index),
+        nullBehavior_(nullBehavior),
+        offsetAdjustment_(offsetAdjustment)
+    {
+        if (nullBehavior == BailOnNull) {
+            // Don't eliminate loads which bail out on a null pointer, for the
+            // same reason as MLoadElement.
+            setGuard();
+        }
+        setResultType(nullBehavior == HandleNull ? MIRType::Value : MIRType::Object);
+        setMovable();
+        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(LoadUnboxedObjectOrNull)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index))
+
+    NullBehavior nullBehavior() const {
+        return nullBehavior_;
+    }
+    int32_t offsetAdjustment() const {
+        return offsetAdjustment_;
+    }
+    bool fallible() const {
+        return nullBehavior() == BailOnNull;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isLoadUnboxedObjectOrNull())
+            return false;
+        const MLoadUnboxedObjectOrNull* other = ins->toLoadUnboxedObjectOrNull();
+        if (nullBehavior() != other->nullBehavior())
+            return false;
+        if (offsetAdjustment() != other->offsetAdjustment())
+            return false;
+        return congruentIfOperandsEqual(other);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::UnboxedElement);
+    }
+    AliasType mightAlias(const MDefinition* store) const override;
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    ALLOW_CLONE(MLoadUnboxedObjectOrNull)
+};
+
+class MLoadUnboxedString
+  : public MBinaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    int32_t offsetAdjustment_;
+
+    MLoadUnboxedString(MDefinition* elements, MDefinition* index, int32_t offsetAdjustment = 0)
+      : MBinaryInstruction(classOpcode, elements, index),
+        offsetAdjustment_(offsetAdjustment)
+    {
+        setResultType(MIRType::String);
+        setMovable();
+        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(LoadUnboxedString)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index))
+
+    int32_t offsetAdjustment() const {
+        return offsetAdjustment_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isLoadUnboxedString())
+            return false;
+        const MLoadUnboxedString* other = ins->toLoadUnboxedString();
+        if (offsetAdjustment() != other->offsetAdjustment())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::UnboxedElement);
+    }
+
+    ALLOW_CLONE(MLoadUnboxedString)
+};
+
+// This instruction is used to load an element of a non-escaped inlined array.
+class MLoadElementFromState
+  : public MBinaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    MLoadElementFromState(MDefinition* array, MDefinition* index)
+      : MBinaryInstruction(classOpcode, array, index)
+    {
+        MOZ_ASSERT(array->isArgumentState());
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+        setResultType(MIRType::Value);
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(LoadElementFromState)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, array), (1, index));
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+// Array.prototype.push on a dense array. Returns the new array length.
+class MArrayPush
+  : public MBinaryInstruction,
+    public MixPolicy<SingleObjectPolicy, NoFloatPolicy<1> >::Data
+{
+    MArrayPush(MDefinition* object, MDefinition* value)
+      : MBinaryInstruction(classOpcode, object, value)
+    {
+        setResultType(MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ArrayPush)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, value))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
+    }
+    void computeRange(TempAllocator& alloc) override;
+
+    ALLOW_CLONE(MArrayPush)
+};
+
+class MArrayJoin
+    : public MBinaryInstruction,
+      public MixPolicy<ObjectPolicy<0>, StringPolicy<1> >::Data
+{
+    bool optimizeForArray_;
+
+    MArrayJoin(MDefinition* array, MDefinition* sep, bool optimizeForArray)
+        : MBinaryInstruction(classOpcode, array, sep),
+          optimizeForArray_(optimizeForArray)
+    {
+        setResultType(MIRType::String);
+    }
+  public:
+    INSTRUCTION_HEADER(ArrayJoin)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, array), (1, sep))
+
+    bool optimizeForArray() const {
+        return optimizeForArray_;
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+    virtual AliasSet getAliasSet() const override {
+        // Array.join might coerce the elements of the Array to strings.  This
+        // coercion might cause the evaluation of the some JavaScript code.
+        return AliasSet::Store(AliasSet::Any);
+    }
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+};
+
+// Load an unboxed scalar value from a typed array or other object.
+class MLoadUnboxedScalar
+  : public MBinaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    Scalar::Type storageType_;
+    Scalar::Type readType_;
+    unsigned numElems_; // used only for SIMD
+    bool requiresBarrier_;
+    int32_t offsetAdjustment_;
+    bool canonicalizeDoubles_;
+
+    MLoadUnboxedScalar(MDefinition* elements, MDefinition* index, Scalar::Type storageType,
+                       MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier,
+                       int32_t offsetAdjustment = 0, bool canonicalizeDoubles = true)
+      : MBinaryInstruction(classOpcode, elements, index),
+        storageType_(storageType),
+        readType_(storageType),
+        numElems_(1),
+        requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
+        offsetAdjustment_(offsetAdjustment),
+        canonicalizeDoubles_(canonicalizeDoubles)
+    {
+        setResultType(MIRType::Value);
+        if (requiresBarrier_)
+            setGuard();         // Not removable or movable
+        else
+            setMovable();
+        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+        MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType);
+    }
+
+  public:
+    INSTRUCTION_HEADER(LoadUnboxedScalar)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index))
+
+    void setSimdRead(Scalar::Type type, unsigned numElems) {
+        readType_ = type;
+        numElems_ = numElems;
+    }
+    unsigned numElems() const {
+        return numElems_;
+    }
+    Scalar::Type readType() const {
+        return readType_;
+    }
+
+    Scalar::Type storageType() const {
+        return storageType_;
+    }
+    bool fallible() const {
+        // Bailout if the result does not fit in an int32.
+        return readType_ == Scalar::Uint32 && type() == MIRType::Int32;
+    }
+    bool requiresMemoryBarrier() const {
+        return requiresBarrier_;
+    }
+    bool canonicalizeDoubles() const {
+        return canonicalizeDoubles_;
+    }
+    int32_t offsetAdjustment() const {
+        return offsetAdjustment_;
+    }
+    void setOffsetAdjustment(int32_t offsetAdjustment) {
+        offsetAdjustment_ = offsetAdjustment;
+    }
+    AliasSet getAliasSet() const override {
+        // When a barrier is needed make the instruction effectful by
+        // giving it a "store" effect.
+        if (requiresBarrier_)
+            return AliasSet::Store(AliasSet::UnboxedElement);
+        return AliasSet::Load(AliasSet::UnboxedElement);
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (requiresBarrier_)
+            return false;
+        if (!ins->isLoadUnboxedScalar())
+            return false;
+        const MLoadUnboxedScalar* other = ins->toLoadUnboxedScalar();
+        if (storageType_ != other->storageType_)
+            return false;
+        if (readType_ != other->readType_)
+            return false;
+        if (numElems_ != other->numElems_)
+            return false;
+        if (offsetAdjustment() != other->offsetAdjustment())
+            return false;
+        if (canonicalizeDoubles() != other->canonicalizeDoubles())
+            return false;
+        return congruentIfOperandsEqual(other);
+    }
+
+    void printOpcode(GenericPrinter& out) const override;
+
+    void computeRange(TempAllocator& alloc) override;
+
+    bool canProduceFloat32() const override { return storageType_ == Scalar::Float32; }
+
+    ALLOW_CLONE(MLoadUnboxedScalar)
+};
+
+// Load a value from a typed array. Out-of-bounds accesses are handled in-line.
+class MLoadTypedArrayElementHole
+  : public MBinaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    Scalar::Type arrayType_;
+    bool allowDouble_;
+
+    MLoadTypedArrayElementHole(MDefinition* object, MDefinition* index, Scalar::Type arrayType, bool allowDouble)
+      : MBinaryInstruction(classOpcode, object, index),
+        arrayType_(arrayType), allowDouble_(allowDouble)
+    {
+        setResultType(MIRType::Value);
+        setMovable();
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+        MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::MaxTypedArrayViewType);
+    }
+
+  public:
+    INSTRUCTION_HEADER(LoadTypedArrayElementHole)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, index))
+
+    Scalar::Type arrayType() const {
+        return arrayType_;
+    }
+    bool allowDouble() const {
+        return allowDouble_;
+    }
+    bool fallible() const {
+        return arrayType_ == Scalar::Uint32 && !allowDouble_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isLoadTypedArrayElementHole())
+            return false;
+        const MLoadTypedArrayElementHole* other = ins->toLoadTypedArrayElementHole();
+        if (arrayType() != other->arrayType())
+            return false;
+        if (allowDouble() != other->allowDouble())
+            return false;
+        return congruentIfOperandsEqual(other);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::UnboxedElement);
+    }
+    bool canProduceFloat32() const override { return arrayType_ == Scalar::Float32; }
+
+    ALLOW_CLONE(MLoadTypedArrayElementHole)
+};
+
+// Compute an "effective address", i.e., a compound computation of the form:
+//   base + index * scale + displacement
+class MEffectiveAddress
+  : public MBinaryInstruction,
+    public NoTypePolicy::Data
+{
+    MEffectiveAddress(MDefinition* base, MDefinition* index, Scale scale, int32_t displacement)
+      : MBinaryInstruction(classOpcode, base, index),
+        scale_(scale), displacement_(displacement)
+    {
+        MOZ_ASSERT(base->type() == MIRType::Int32);
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+        setMovable();
+        setResultType(MIRType::Int32);
+    }
+
+    Scale scale_;
+    int32_t displacement_;
+
+  public:
+    INSTRUCTION_HEADER(EffectiveAddress)
+    TRIVIAL_NEW_WRAPPERS
+
+    MDefinition* base() const {
+        return lhs();
+    }
+    MDefinition* index() const {
+        return rhs();
+    }
+    Scale scale() const {
+        return scale_;
+    }
+    int32_t displacement() const {
+        return displacement_;
+    }
+
+    ALLOW_CLONE(MEffectiveAddress)
+};
+
+class MStoreFixedSlot
+  : public MBinaryInstruction,
+    public MixPolicy<SingleObjectPolicy, NoFloatPolicy<1> >::Data
+{
+    bool needsBarrier_;
+    size_t slot_;
+
+    MStoreFixedSlot(MDefinition* obj, MDefinition* rval, size_t slot, bool barrier)
+      : MBinaryInstruction(classOpcode, obj, rval),
+        needsBarrier_(barrier),
+        slot_(slot)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(StoreFixedSlot)
+    NAMED_OPERANDS((0, object), (1, value))
+
+    static MStoreFixedSlot* New(TempAllocator& alloc, MDefinition* obj, size_t slot,
+                                MDefinition* rval)
+    {
+        return new(alloc) MStoreFixedSlot(obj, rval, slot, false);
+    }
+    static MStoreFixedSlot* NewBarriered(TempAllocator& alloc, MDefinition* obj, size_t slot,
+                                         MDefinition* rval)
+    {
+        return new(alloc) MStoreFixedSlot(obj, rval, slot, true);
+    }
+
+    size_t slot() const {
+        return slot_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::FixedSlot);
+    }
+    bool needsBarrier() const {
+        return needsBarrier_;
+    }
+    void setNeedsBarrier(bool needsBarrier = true) {
+        needsBarrier_ = needsBarrier;
+    }
+
+    ALLOW_CLONE(MStoreFixedSlot)
+};
+
+class MGetPropertyCache
+  : public MBinaryInstruction,
+    public MixPolicy<BoxExceptPolicy<0, MIRType::Object>, CacheIdPolicy<1>>::Data
+{
+    bool idempotent_ : 1;
+    bool monitoredResult_ : 1;
+
+    InlinePropertyTable* inlinePropertyTable_;
+
+    MGetPropertyCache(MDefinition* obj, MDefinition* id, bool monitoredResult)
+      : MBinaryInstruction(classOpcode, obj, id),
+        idempotent_(false),
+        monitoredResult_(monitoredResult),
+        inlinePropertyTable_(nullptr)
+    {
+        setResultType(MIRType::Value);
+
+        // The cache will invalidate if there are objects with e.g. lookup or
+        // resolve hooks on the proto chain. setGuard ensures this check is not
+        // eliminated.
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(GetPropertyCache)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, value), (1, idval))
+
+    InlinePropertyTable* initInlinePropertyTable(TempAllocator& alloc, jsbytecode* pc) {
+        MOZ_ASSERT(inlinePropertyTable_ == nullptr);
+        inlinePropertyTable_ = new(alloc) InlinePropertyTable(alloc, pc);
+        return inlinePropertyTable_;
+    }
+
+    void clearInlinePropertyTable() {
+        inlinePropertyTable_ = nullptr;
+    }
+
+    InlinePropertyTable* propTable() const {
+        return inlinePropertyTable_;
+    }
+
+    bool idempotent() const {
+        return idempotent_;
+    }
+    void setIdempotent() {
+        idempotent_ = true;
+        setMovable();
+    }
+    bool monitoredResult() const {
+        return monitoredResult_;
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!idempotent_)
+            return false;
+        if (!ins->isGetPropertyCache())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        if (idempotent_) {
+            return AliasSet::Load(AliasSet::ObjectFields |
+                                  AliasSet::FixedSlot |
+                                  AliasSet::DynamicSlot);
+        }
+        return AliasSet::Store(AliasSet::Any);
+    }
+
+    bool allowDoubleResult() const;
+
+    bool appendRoots(MRootList& roots) const override {
+        if (inlinePropertyTable_)
+            return inlinePropertyTable_->appendRoots(roots);
+        return true;
+    }
+};
+
+// Emit code to store a value to an object's slots if its shape/group matches
+// one of the shapes/groups observed by the baseline IC, else bails out.
+class MSetPropertyPolymorphic
+  : public MBinaryInstruction,
+    public MixPolicy<SingleObjectPolicy, NoFloatPolicy<1> >::Data
+{
+    Vector<PolymorphicEntry, 4, JitAllocPolicy> receivers_;
+    CompilerPropertyName name_;
+    bool needsBarrier_;
+
+    MSetPropertyPolymorphic(TempAllocator& alloc, MDefinition* obj, MDefinition* value,
+                            PropertyName* name)
+      : MBinaryInstruction(classOpcode, obj, value),
+        receivers_(alloc),
+        name_(name),
+        needsBarrier_(false)
+    {
+    }
+
+  public:
+    INSTRUCTION_HEADER(SetPropertyPolymorphic)
+    NAMED_OPERANDS((0, object), (1, value))
+
+    static MSetPropertyPolymorphic* New(TempAllocator& alloc, MDefinition* obj, MDefinition* value,
+                                        PropertyName* name) {
+        return new(alloc) MSetPropertyPolymorphic(alloc, obj, value, name);
+    }
+
+    MOZ_MUST_USE bool addReceiver(const ReceiverGuard& receiver, Shape* shape) {
+        PolymorphicEntry entry;
+        entry.receiver = receiver;
+        entry.shape = shape;
+        return receivers_.append(entry);
+    }
+    size_t numReceivers() const {
+        return receivers_.length();
+    }
+    const ReceiverGuard& receiver(size_t i) const {
+        return receivers_[i].receiver;
+    }
+    Shape* shape(size_t i) const {
+        return receivers_[i].shape;
+    }
+    PropertyName* name() const {
+        return name_;
+    }
+    bool needsBarrier() const {
+        return needsBarrier_;
+    }
+    void setNeedsBarrier() {
+        needsBarrier_ = true;
+    }
+    AliasSet getAliasSet() const override {
+        bool hasUnboxedStore = false;
+        for (size_t i = 0; i < numReceivers(); i++) {
+            if (!shape(i)) {
+                hasUnboxedStore = true;
+                break;
+            }
+        }
+        return AliasSet::Store(AliasSet::ObjectFields |
+                               AliasSet::FixedSlot |
+                               AliasSet::DynamicSlot |
+                               (hasUnboxedStore ? AliasSet::UnboxedElement : 0));
+    }
+    bool appendRoots(MRootList& roots) const override;
+};
+
+// Guard on an object's identity, inclusively or exclusively.
+class MGuardObjectIdentity
+  : public MBinaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    bool bailOnEquality_;
+
+    MGuardObjectIdentity(MDefinition* obj, MDefinition* expected, bool bailOnEquality)
+      : MBinaryInstruction(classOpcode, obj, expected),
+        bailOnEquality_(bailOnEquality)
+    {
+        setGuard();
+        setMovable();
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(GuardObjectIdentity)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, expected))
+
+    bool bailOnEquality() const {
+        return bailOnEquality_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isGuardObjectIdentity())
+            return false;
+        if (bailOnEquality() != ins->toGuardObjectIdentity()->bailOnEquality())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::ObjectFields);
+    }
+};
+
+// Store to vp[slot] (slots that are not inline in an object).
+class MStoreSlot
+  : public MBinaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, NoFloatPolicy<1> >::Data
+{
+    uint32_t slot_;
+    MIRType slotType_;
+    bool needsBarrier_;
+
+    MStoreSlot(MDefinition* slots, uint32_t slot, MDefinition* value, bool barrier)
+        : MBinaryInstruction(classOpcode, slots, value),
+          slot_(slot),
+          slotType_(MIRType::Value),
+          needsBarrier_(barrier)
+    {
+        MOZ_ASSERT(slots->type() == MIRType::Slots);
+    }
+
+  public:
+    INSTRUCTION_HEADER(StoreSlot)
+    NAMED_OPERANDS((0, slots), (1, value))
+
+    static MStoreSlot* New(TempAllocator& alloc, MDefinition* slots, uint32_t slot,
+                           MDefinition* value)
+    {
+        return new(alloc) MStoreSlot(slots, slot, value, false);
+    }
+    static MStoreSlot* NewBarriered(TempAllocator& alloc, MDefinition* slots, uint32_t slot,
+                                    MDefinition* value)
+    {
+        return new(alloc) MStoreSlot(slots, slot, value, true);
+    }
+
+    uint32_t slot() const {
+        return slot_;
+    }
+    MIRType slotType() const {
+        return slotType_;
+    }
+    void setSlotType(MIRType slotType) {
+        MOZ_ASSERT(slotType != MIRType::None);
+        slotType_ = slotType;
+    }
+    bool needsBarrier() const {
+        return needsBarrier_;
+    }
+    void setNeedsBarrier() {
+        needsBarrier_ = true;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::DynamicSlot);
+    }
+    void printOpcode(GenericPrinter& out) const override;
+
+    ALLOW_CLONE(MStoreSlot)
+};
+
+class MSetPropertyInstruction : public MBinaryInstruction
+{
+    CompilerPropertyName name_;
+    bool strict_;
+
+  protected:
+    MSetPropertyInstruction(Opcode op, MDefinition* obj, MDefinition* value, PropertyName* name,
+                            bool strict)
+      : MBinaryInstruction(op, obj, value),
+        name_(name), strict_(strict)
+    {}
+
+  public:
+    NAMED_OPERANDS((0, object), (1, value))
+    PropertyName* name() const {
+        return name_;
+    }
+    bool strict() const {
+        return strict_;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(name_);
+    }
+};
+
+class MDeleteElement
+  : public MBinaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    bool strict_;
+
+    MDeleteElement(MDefinition* value, MDefinition* index, bool strict)
+      : MBinaryInstruction(classOpcode, value, index),
+        strict_(strict)
+    {
+        setResultType(MIRType::Boolean);
+    }
+
+  public:
+    INSTRUCTION_HEADER(DeleteElement)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, value), (1, index))
+
+    bool strict() const {
+        return strict_;
+    }
+};
+
+// Inline call to handle lhs[rhs]. The first input is a Value so that this
+// instruction can handle both objects and strings.
+class MCallGetElement
+  : public MBinaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    MCallGetElement(MDefinition* lhs, MDefinition* rhs)
+      : MBinaryInstruction(classOpcode, lhs, rhs)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(CallGetElement)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+class MSetDOMProperty
+  : public MBinaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >::Data
+{
+    const JSJitSetterOp func_;
+
+    MSetDOMProperty(const JSJitSetterOp func, MDefinition* obj, MDefinition* val)
+      : MBinaryInstruction(classOpcode, obj, val),
+        func_(func)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(SetDOMProperty)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, value))
+
+    JSJitSetterOp fun() const {
+        return func_;
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+// Implementation for 'in' operator using instruction cache
+class MInCache
+  : public MBinaryInstruction,
+    public MixPolicy<CacheIdPolicy<0>, ObjectPolicy<1> >::Data
+{
+    MInCache(MDefinition* key, MDefinition* obj)
+      : MBinaryInstruction(classOpcode, key, obj)
+    {
+        setResultType(MIRType::Boolean);
+    }
+
+  public:
+    INSTRUCTION_HEADER(InCache)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, key), (1, object))
+};
+
+class MHasOwnCache
+  : public MBinaryInstruction,
+    public MixPolicy<BoxExceptPolicy<0, MIRType::Object>, CacheIdPolicy<1>>::Data
+{
+    MHasOwnCache(MDefinition* obj, MDefinition* id)
+      : MBinaryInstruction(classOpcode, obj, id)
+    {
+        setResultType(MIRType::Boolean);
+    }
+
+  public:
+    INSTRUCTION_HEADER(HasOwnCache)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, value), (1, idval))
+};
+
+// Implementation for instanceof operator with unknown rhs.
+class MCallInstanceOf
+  : public MBinaryInstruction,
+    public MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >::Data
+{
+    MCallInstanceOf(MDefinition* obj, MDefinition* proto)
+      : MBinaryInstruction(classOpcode, obj, proto)
+    {
+        setResultType(MIRType::Boolean);
+    }
+
+  public:
+    INSTRUCTION_HEADER(CallInstanceOf)
+    TRIVIAL_NEW_WRAPPERS
+};
+
+// Given a value being written to another object, update the generational store
+// buffer if the value is in the nursery and object is in the tenured heap.
+class MPostWriteBarrier : public MBinaryInstruction, public ObjectPolicy<0>::Data
+{
+    MPostWriteBarrier(MDefinition* obj, MDefinition* value)
+      : MBinaryInstruction(classOpcode, obj, value)
+    {
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(PostWriteBarrier)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, value))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+#ifdef DEBUG
+    bool isConsistentFloat32Use(MUse* use) const override {
+        // During lowering, values that neither have object nor value MIR type
+        // are ignored, thus Float32 can show up at this point without any issue.
+        return use == getUseFor(1);
+    }
+#endif
+
+    ALLOW_CLONE(MPostWriteBarrier)
+};
+
+class MCheckReturn
+  : public MBinaryInstruction,
+    public BoxInputsPolicy::Data
+{
+    explicit MCheckReturn(MDefinition* retVal, MDefinition* thisVal)
+      : MBinaryInstruction(classOpcode, retVal, thisVal)
+    {
+        setGuard();
+        setResultType(MIRType::Value);
+        setResultTypeSet(retVal->resultTypeSet());
+    }
+
+  public:
+    INSTRUCTION_HEADER(CheckReturn)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, returnValue), (1, thisValue))
+
+};
+
+class MWasmBoundsCheck
+  : public MBinaryInstruction,
+    public NoTypePolicy::Data
+{
+    wasm::BytecodeOffset bytecodeOffset_;
+
+    explicit MWasmBoundsCheck(MDefinition* index, MDefinition* boundsCheckLimit,
+                              wasm::BytecodeOffset bytecodeOffset)
+      : MBinaryInstruction(classOpcode, index, boundsCheckLimit),
+        bytecodeOffset_(bytecodeOffset)
+    {
+        // Bounds check is effectful: it throws for OOB.
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmBoundsCheck)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, index), (1, boundsCheckLimit))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool isRedundant() const {
+        return !isGuard();
+    }
+
+    void setRedundant() {
+        setNotGuard();
+    }
+
+    wasm::BytecodeOffset bytecodeOffset() const {
+        return bytecodeOffset_;
+    }
+};
+
+class MWasmStoreGlobalVar
+  : public MBinaryInstruction,
+    public NoTypePolicy::Data
+{
+    MWasmStoreGlobalVar(unsigned globalDataOffset, MDefinition* value, MDefinition* tlsPtr)
+      : MBinaryInstruction(classOpcode, value, tlsPtr),
+        globalDataOffset_(globalDataOffset)
+    { }
+
+    unsigned globalDataOffset_;
+
+  public:
+    INSTRUCTION_HEADER(WasmStoreGlobalVar)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, value), (1, tlsPtr))
+
+    unsigned globalDataOffset() const { return globalDataOffset_; }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::WasmGlobalVar);
+    }
+};
+
+class MRotate
+  : public MBinaryInstruction,
+    public NoTypePolicy::Data
+{
+    bool isLeftRotate_;
+
+    MRotate(MDefinition* input, MDefinition* count, MIRType type, bool isLeftRotate)
+      : MBinaryInstruction(classOpcode, input, count), isLeftRotate_(isLeftRotate)
+    {
+        setMovable();
+        setResultType(type);
+    }
+
+  public:
+    INSTRUCTION_HEADER(Rotate)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, input), (1, count))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins) && ins->toRotate()->isLeftRotate() == isLeftRotate_;
+    }
+
+    bool isLeftRotate() const {
+        return isLeftRotate_;
+    }
+
+    ALLOW_CLONE(MRotate)
+};
+
+// Creates a new derived type object. At runtime, this is just a call
+// to `BinaryBlock::createDerived()`. That is, the MIR itself does not
+// compile to particularly optimized code. However, using a distinct
+// MIR for creating derived type objects allows the compiler to
+// optimize ephemeral typed objects as would be created for a
+// reference like `a.b.c` -- here, the `a.b` will create an ephemeral
+// derived type object that aliases the memory of `a` itself. The
+// specific nature of `a.b` is revealed by using
+// `MNewDerivedTypedObject` rather than `MGetProperty` or what have
+// you. Moreover, the compiler knows that there are no side-effects,
+// so `MNewDerivedTypedObject` instructions can be reordered or pruned
+// as dead code.
+class MNewDerivedTypedObject
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>,
+                     ObjectPolicy<1>,
+                     IntPolicy<2> >::Data
+{
+  private:
+    TypedObjectPrediction prediction_;
+
+    MNewDerivedTypedObject(TypedObjectPrediction prediction,
+                           MDefinition* type,
+                           MDefinition* owner,
+                           MDefinition* offset)
+      : MTernaryInstruction(classOpcode, type, owner, offset),
+        prediction_(prediction)
+    {
+        setMovable();
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(NewDerivedTypedObject)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, type), (1, owner), (2, offset))
+
+    TypedObjectPrediction prediction() const {
+        return prediction_;
+    }
+
+    virtual AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+};
+
+class MInitElem
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1>, BoxPolicy<2> >::Data
+{
+    MInitElem(MDefinition* obj, MDefinition* id, MDefinition* value)
+      : MTernaryInstruction(classOpcode, obj, id, value)
+    {
+        setResultType(MIRType::None);
+    }
+
+  public:
+    INSTRUCTION_HEADER(InitElem)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, getObject), (1, getId), (2, getValue))
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+class MInitElemGetterSetter
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1>, ObjectPolicy<2> >::Data
+{
+    MInitElemGetterSetter(MDefinition* obj, MDefinition* id, MDefinition* value)
+      : MTernaryInstruction(classOpcode, obj, id, value)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(InitElemGetterSetter)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, idValue), (2, value))
+
+};
+
+// fun.apply(self, arguments)
+class MApplyArgs
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, IntPolicy<1>, BoxPolicy<2> >::Data
+{
+  protected:
+    // Monomorphic cache of single target from TI, or nullptr.
+    WrappedFunction* target_;
+
+    MApplyArgs(WrappedFunction* target, MDefinition* fun, MDefinition* argc, MDefinition* self)
+      : MTernaryInstruction(classOpcode, fun, argc, self),
+        target_(target)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ApplyArgs)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, getFunction), (1, getArgc), (2, getThis))
+
+    // For TI-informed monomorphic callsites.
+    WrappedFunction* getSingleTarget() const {
+        return target_;
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+
+    bool appendRoots(MRootList& roots) const override {
+        if (target_)
+            return target_->appendRoots(roots);
+        return true;
+    }
+};
+
+// fun.apply(fn, array)
+class MApplyArray
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>, BoxPolicy<2> >::Data
+{
+  protected:
+    // Monomorphic cache of single target from TI, or nullptr.
+    WrappedFunction* target_;
+
+    MApplyArray(WrappedFunction* target, MDefinition* fun, MDefinition* elements, MDefinition* self)
+      : MTernaryInstruction(classOpcode, fun, elements, self),
+        target_(target)
+    {
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ApplyArray)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, getFunction), (1, getElements), (2, getThis))
+
+    // For TI-informed monomorphic callsites.
+    WrappedFunction* getSingleTarget() const {
+        return target_;
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+
+    bool appendRoots(MRootList& roots) const override {
+        if (target_)
+            return target_->appendRoots(roots);
+        return true;
+    }
+};
+
+// Caller-side allocation of |this| for |new|:
+// Given a prototype operand, construct |this| for JSOP_NEW.
+class MCreateThisWithProto
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>, ObjectPolicy<2> >::Data
+{
+    MCreateThisWithProto(MDefinition* callee, MDefinition* newTarget, MDefinition* prototype)
+      : MTernaryInstruction(classOpcode, callee, newTarget, prototype)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(CreateThisWithProto)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, getCallee), (1, getNewTarget), (2, getPrototype))
+
+    // Although creation of |this| modifies global state, it is safely repeatable.
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+class MSimdSelect
+  : public MTernaryInstruction,
+    public SimdSelectPolicy::Data
+{
+    MSimdSelect(MDefinition* mask, MDefinition* lhs, MDefinition* rhs)
+      : MTernaryInstruction(classOpcode, mask, lhs, rhs)
+    {
+        MOZ_ASSERT(IsBooleanSimdType(mask->type()));
+        MOZ_ASSERT(lhs->type() == lhs->type());
+        MIRType type = lhs->type();
+        MOZ_ASSERT(IsSimdType(type));
+        setResultType(type);
+        specialization_ = type;
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdSelect)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, mask))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    ALLOW_CLONE(MSimdSelect)
+};
+
+class MRegExpMatcher
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>,
+                     StringPolicy<1>,
+                     IntPolicy<2> >::Data
+{
+  private:
+
+    MRegExpMatcher(MDefinition* regexp, MDefinition* string, MDefinition* lastIndex)
+      : MTernaryInstruction(classOpcode, regexp, string, lastIndex)
+    {
+        setMovable();
+        // May be object or null.
+        setResultType(MIRType::Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(RegExpMatcher)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, regexp), (1, string), (2, lastIndex))
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+class MRegExpSearcher
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>,
+                     StringPolicy<1>,
+                     IntPolicy<2> >::Data
+{
+  private:
+
+    MRegExpSearcher(MDefinition* regexp, MDefinition* string, MDefinition* lastIndex)
+      : MTernaryInstruction(classOpcode, regexp, string, lastIndex)
+    {
+        setMovable();
+        setResultType(MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(RegExpSearcher)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, regexp), (1, string), (2, lastIndex))
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+class MRegExpTester
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>,
+                     StringPolicy<1>,
+                     IntPolicy<2> >::Data
+{
+  private:
+
+    MRegExpTester(MDefinition* regexp, MDefinition* string, MDefinition* lastIndex)
+      : MTernaryInstruction(classOpcode, regexp, string, lastIndex)
+    {
+        setMovable();
+        setResultType(MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(RegExpTester)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, regexp), (1, string), (2, lastIndex))
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+};
+
+class MStringReplace
+  : public MTernaryInstruction,
+    public MixPolicy<StringPolicy<0>, StringPolicy<1>, StringPolicy<2> >::Data
+{
+  private:
+
+    bool isFlatReplacement_;
+
+    MStringReplace(MDefinition* string, MDefinition* pattern, MDefinition* replacement)
+      : MTernaryInstruction(classOpcode, string, pattern, replacement),
+        isFlatReplacement_(false)
+    {
+        setMovable();
+        setResultType(MIRType::String);
+    }
+
+  public:
+    INSTRUCTION_HEADER(StringReplace)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, string), (1, pattern), (2, replacement))
+
+    void setFlatReplacement() {
+        MOZ_ASSERT(!isFlatReplacement_);
+        isFlatReplacement_ = true;
+    }
+
+    bool isFlatReplacement() const {
+        return isFlatReplacement_;
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isStringReplace())
+            return false;
+        if (isFlatReplacement_ != ins->toStringReplace()->isFlatReplacement())
+            return false;
+        return congruentIfOperandsEqual(ins);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        if (isFlatReplacement_) {
+            MOZ_ASSERT(!pattern()->isRegExp());
+            return true;
+        }
+        return false;
+    }
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+class MSubstr
+  : public MTernaryInstruction,
+    public MixPolicy<StringPolicy<0>, IntPolicy<1>, IntPolicy<2>>::Data
+{
+  private:
+
+    MSubstr(MDefinition* string, MDefinition* begin, MDefinition* length)
+      : MTernaryInstruction(classOpcode, string, begin, length)
+    {
+        setResultType(MIRType::String);
+    }
+
+  public:
+    INSTRUCTION_HEADER(Substr)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, string), (1, begin), (2, length))
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
+class MLambdaArrow
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, BoxPolicy<1>, ObjectPolicy<2>>::Data
+{
+    const LambdaFunctionInfo info_;
+
+    MLambdaArrow(TempAllocator& alloc, CompilerConstraintList* constraints, MDefinition* envChain,
+                 MDefinition* newTarget, MConstant* cst)
+      : MTernaryInstruction(classOpcode, envChain, newTarget, cst),
+        info_(&cst->toObject().as<JSFunction>())
+    {
+        setResultType(MIRType::Object);
+        MOZ_ASSERT(!ObjectGroup::useSingletonForClone(info().fun));
+        if (!info().fun->isSingleton())
+            setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, info().fun));
+    }
+
+  public:
+    INSTRUCTION_HEADER(LambdaArrow)
+    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+    NAMED_OPERANDS((0, environmentChain), (1, newTargetDef))
+
+    MConstant* functionOperand() const {
+        return getOperand(2)->toConstant();
+    }
+    const LambdaFunctionInfo& info() const {
+        return info_;
+    }
+    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+    bool canRecoverOnBailout() const override {
+        return true;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return info_.appendRoots(roots);
+    }
+};
+
+class MSetDisjointTypedElements
+  : public MTernaryInstruction,
+    public NoTypePolicy::Data
+{
+    explicit MSetDisjointTypedElements(MDefinition* target, MDefinition* targetOffset,
+                                       MDefinition* source)
+      : MTernaryInstruction(classOpcode, target, targetOffset, source)
+    {
+        MOZ_ASSERT(target->type() == MIRType::Object);
+        MOZ_ASSERT(targetOffset->type() == MIRType::Int32);
+        MOZ_ASSERT(source->type() == MIRType::Object);
+        setResultType(MIRType::None);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SetDisjointTypedElements)
+    NAMED_OPERANDS((0, target), (1, targetOffset), (2, source))
+
+    static MSetDisjointTypedElements*
+    New(TempAllocator& alloc, MDefinition* target, MDefinition* targetOffset,
+        MDefinition* source)
+    {
+        return new(alloc) MSetDisjointTypedElements(target, targetOffset, source);
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::UnboxedElement);
+    }
+
+    ALLOW_CLONE(MSetDisjointTypedElements)
+};
+
+// Load a value from the elements vector of a native object. If the index is
+// out-of-bounds, or the indexed slot has a hole, undefined is returned instead.
+class MLoadElementHole
+  : public MTernaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    bool needsNegativeIntCheck_;
+    bool needsHoleCheck_;
+
+    MLoadElementHole(MDefinition* elements, MDefinition* index, MDefinition* initLength,
+                     bool needsHoleCheck)
+      : MTernaryInstruction(classOpcode, elements, index, initLength),
+        needsNegativeIntCheck_(true),
+        needsHoleCheck_(needsHoleCheck)
+    {
+        setResultType(MIRType::Value);
+        setMovable();
+
+        // Set the guard flag to make sure we bail when we see a negative
+        // index. We can clear this flag (and needsNegativeIntCheck_) in
+        // collectRangeInfoPreTrunc.
+        setGuard();
+
+        MOZ_ASSERT(elements->type() == MIRType::Elements);
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+        MOZ_ASSERT(initLength->type() == MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(LoadElementHole)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index), (2, initLength))
+
+    bool needsNegativeIntCheck() const {
+        return needsNegativeIntCheck_;
+    }
+    bool needsHoleCheck() const {
+        return needsHoleCheck_;
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isLoadElementHole())
+            return false;
+        const MLoadElementHole* other = ins->toLoadElementHole();
+        if (needsHoleCheck() != other->needsHoleCheck())
+            return false;
+        if (needsNegativeIntCheck() != other->needsNegativeIntCheck())
+            return false;
+        return congruentIfOperandsEqual(other);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::Element);
+    }
+    void collectRangeInfoPreTrunc() override;
+
+    ALLOW_CLONE(MLoadElementHole)
+};
+
+// Store a value to a dense array slots vector.
+class MStoreElement
+  : public MTernaryInstruction,
+    public MStoreElementCommon,
+    public MixPolicy<SingleObjectPolicy, NoFloatPolicy<2> >::Data
+{
+    bool needsHoleCheck_;
+    int32_t offsetAdjustment_;
+
+    MStoreElement(MDefinition* elements, MDefinition* index, MDefinition* value,
+                  bool needsHoleCheck, int32_t offsetAdjustment = 0)
+      : MTernaryInstruction(classOpcode, elements, index, value)
+    {
+        needsHoleCheck_ = needsHoleCheck;
+        offsetAdjustment_ = offsetAdjustment;
+        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(StoreElement)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::Element);
+    }
+    bool needsHoleCheck() const {
+        return needsHoleCheck_;
+    }
+    int32_t offsetAdjustment() const {
+        return offsetAdjustment_;
+    }
+    bool fallible() const {
+        return needsHoleCheck();
+    }
+
+    ALLOW_CLONE(MStoreElement)
+};
+
+// Store an unboxed object or null pointer to a vector.
+class MStoreUnboxedString
+  : public MTernaryInstruction,
+    public MixPolicy<SingleObjectPolicy, ConvertToStringPolicy<2> >::Data
+{
+    int32_t offsetAdjustment_;
+    bool preBarrier_;
+
+    MStoreUnboxedString(MDefinition* elements, MDefinition* index, MDefinition* value,
+                        int32_t offsetAdjustment = 0, bool preBarrier = true)
+      : MTernaryInstruction(classOpcode, elements, index, value),
+        offsetAdjustment_(offsetAdjustment),
+        preBarrier_(preBarrier)
+    {
+        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(StoreUnboxedString)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+    int32_t offsetAdjustment() const {
+        return offsetAdjustment_;
+    }
+    bool preBarrier() const {
+        return preBarrier_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::UnboxedElement);
+    }
+
+    ALLOW_CLONE(MStoreUnboxedString)
+};
+
+// Array.prototype.slice on a dense array.
+class MArraySlice
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2>>::Data
+{
+    CompilerObject templateObj_;
+    gc::InitialHeap initialHeap_;
+
+    MArraySlice(CompilerConstraintList* constraints, MDefinition* obj,
+                MDefinition* begin, MDefinition* end,
+                JSObject* templateObj, gc::InitialHeap initialHeap)
+      : MTernaryInstruction(classOpcode, obj, begin, end),
+        templateObj_(templateObj),
+        initialHeap_(initialHeap)
+    {
+        setResultType(MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(ArraySlice)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, begin), (2, end))
+
+    JSObject* templateObj() const {
+        return templateObj_;
+    }
+
+    gc::InitialHeap initialHeap() const {
+        return initialHeap_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::Element | AliasSet::ObjectFields);
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+    bool appendRoots(MRootList& roots) const override {
+        return roots.append(templateObj_);
+    }
+};
+
+// Store an unboxed scalar value to a typed array or other object.
+class MStoreUnboxedScalar
+  : public MTernaryInstruction,
+    public StoreUnboxedScalarBase,
+    public StoreUnboxedScalarPolicy::Data
+{
+  public:
+    enum TruncateInputKind {
+        DontTruncateInput,
+        TruncateInput
+    };
+
+  private:
+    Scalar::Type storageType_;
+
+    // Whether this store truncates out of range inputs, for use by range analysis.
+    TruncateInputKind truncateInput_;
+
+    bool requiresBarrier_;
+    int32_t offsetAdjustment_;
+    unsigned numElems_; // used only for SIMD
+
+    MStoreUnboxedScalar(MDefinition* elements, MDefinition* index, MDefinition* value,
+                        Scalar::Type storageType, TruncateInputKind truncateInput,
+                        MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier,
+                        int32_t offsetAdjustment = 0)
+      : MTernaryInstruction(classOpcode, elements, index, value),
+        StoreUnboxedScalarBase(storageType),
+        storageType_(storageType),
+        truncateInput_(truncateInput),
+        requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
+        offsetAdjustment_(offsetAdjustment),
+        numElems_(1)
+    {
+        if (requiresBarrier_)
+            setGuard();         // Not removable or movable
+        else
+            setMovable();
+        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+        MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType);
+    }
+
+  public:
+    INSTRUCTION_HEADER(StoreUnboxedScalar)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+    void setSimdWrite(Scalar::Type writeType, unsigned numElems) {
+        MOZ_ASSERT(Scalar::isSimdType(writeType));
+        setWriteType(writeType);
+        numElems_ = numElems;
+    }
+    unsigned numElems() const {
+        return numElems_;
+    }
+    Scalar::Type storageType() const {
+        return storageType_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::UnboxedElement);
+    }
+    TruncateInputKind truncateInput() const {
+        return truncateInput_;
+    }
+    bool requiresMemoryBarrier() const {
+        return requiresBarrier_;
+    }
+    int32_t offsetAdjustment() const {
+        return offsetAdjustment_;
+    }
+    TruncateKind operandTruncateKind(size_t index) const override;
+
+    bool canConsumeFloat32(MUse* use) const override {
+        return use == getUseFor(2) && writeType() == Scalar::Float32;
+    }
+
+    ALLOW_CLONE(MStoreUnboxedScalar)
+};
+
+class MGetPropSuperCache
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, BoxExceptPolicy<1, MIRType::Object>, CacheIdPolicy<2>>::Data
+{
+    MGetPropSuperCache(MDefinition* obj, MDefinition* receiver, MDefinition* id)
+      : MTernaryInstruction(classOpcode, obj, receiver, id)
+    {
+        setResultType(MIRType::Value);
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(GetPropSuperCache)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, receiver), (2, idval))
+};
+
+class MSetElementInstruction
+  : public MTernaryInstruction
+{
+    bool strict_;
+  protected:
+    MSetElementInstruction(Opcode op, MDefinition* object, MDefinition* index, MDefinition* value,
+                           bool strict)
+      : MTernaryInstruction(op, object, index, value),
+        strict_(strict)
+    {
+    }
+
+  public:
+    NAMED_OPERANDS((0, object), (1, index), (2, value))
+    bool strict() const {
+        return strict_;
+    }
+};
+
+class MSetPropertyCache
+  : public MTernaryInstruction,
+    public MixPolicy<SingleObjectPolicy, CacheIdPolicy<1>, NoFloatPolicy<2>>::Data
+{
+    bool strict_ : 1;
+    bool needsPostBarrier_ : 1;
+    bool needsTypeBarrier_ : 1;
+    bool guardHoles_ : 1;
+
+    MSetPropertyCache(MDefinition* obj, MDefinition* id, MDefinition* value, bool strict,
+                      bool needsPostBarrier, bool typeBarrier, bool guardHoles)
+      : MTernaryInstruction(classOpcode, obj, id, value),
+        strict_(strict),
+        needsPostBarrier_(needsPostBarrier),
+        needsTypeBarrier_(typeBarrier),
+        guardHoles_(guardHoles)
+    {
+    }
+
+  public:
+    INSTRUCTION_HEADER(SetPropertyCache)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, idval), (2, value))
+
+    bool needsPostBarrier() const {
+        return needsPostBarrier_;
+    }
+    bool needsTypeBarrier() const {
+        return needsTypeBarrier_;
+    }
+
+    bool guardHoles() const {
+        return guardHoles_;
+    }
+
+    bool strict() const {
+        return strict_;
+    }
+};
+
+class MCallInitElementArray
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, IntPolicy<1>, BoxPolicy<2> >::Data
+{
+    MCallInitElementArray(MDefinition* obj, MDefinition* index, MDefinition* val)
+      : MTernaryInstruction(classOpcode, obj, index, val)
+    {
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(CallInitElementArray)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, index), (2, value))
+
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
+// Given a value being written to another object's elements at the specified
+// index, update the generational store buffer if the value is in the nursery
+// and object is in the tenured heap.
+class MPostWriteElementBarrier : public MTernaryInstruction
+                               , public MixPolicy<ObjectPolicy<0>, IntPolicy<2>>::Data
+{
+    MPostWriteElementBarrier(MDefinition* obj, MDefinition* value, MDefinition* index)
+      : MTernaryInstruction(classOpcode, obj, value, index)
+    {
+        setGuard();
+    }
+
+  public:
+    INSTRUCTION_HEADER(PostWriteElementBarrier)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, value), (2, index))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+#ifdef DEBUG
+    bool isConsistentFloat32Use(MUse* use) const override {
+        // During lowering, values that neither have object nor value MIR type
+        // are ignored, thus Float32 can show up at this point without any issue.
+        return use == getUseFor(1);
+    }
+#endif
+
+    ALLOW_CLONE(MPostWriteElementBarrier)
+};
+
+class MAtomicExchangeTypedArrayElement
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, IntPolicy<1>, TruncateToInt32Policy<2>>::Data
+{
+    Scalar::Type arrayType_;
+
+    MAtomicExchangeTypedArrayElement(MDefinition* elements, MDefinition* index, MDefinition* value,
+                                     Scalar::Type arrayType)
+      : MTernaryInstruction(classOpcode, elements, index, value),
+        arrayType_(arrayType)
+    {
+        MOZ_ASSERT(arrayType <= Scalar::Uint32);
+        setGuard();             // Not removable
+    }
+
+  public:
+    INSTRUCTION_HEADER(AtomicExchangeTypedArrayElement)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+    bool isByteArray() const {
+        return (arrayType_ == Scalar::Int8 ||
+                arrayType_ == Scalar::Uint8);
+    }
+    Scalar::Type arrayType() const {
+        return arrayType_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::UnboxedElement);
+    }
+};
+
+class MAtomicTypedArrayElementBinop
+  : public MTernaryInstruction,
+    public MixPolicy< ObjectPolicy<0>, IntPolicy<1>, TruncateToInt32Policy<2> >::Data
+{
+  private:
+    AtomicOp op_;
+    Scalar::Type arrayType_;
+
+  protected:
+    explicit MAtomicTypedArrayElementBinop(AtomicOp op, MDefinition* elements, MDefinition* index,
+                                           Scalar::Type arrayType, MDefinition* value)
+      : MTernaryInstruction(classOpcode, elements, index, value),
+        op_(op),
+        arrayType_(arrayType)
+    {
+        setGuard();             // Not removable
+    }
+
+  public:
+    INSTRUCTION_HEADER(AtomicTypedArrayElementBinop)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index), (2, value))
+
+    bool isByteArray() const {
+        return (arrayType_ == Scalar::Int8 ||
+                arrayType_ == Scalar::Uint8);
+    }
+    AtomicOp operation() const {
+        return op_;
+    }
+    Scalar::Type arrayType() const {
+        return arrayType_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::UnboxedElement);
+    }
+};
+
+class MFinishBoundFunctionInit
+  : public MTernaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>, IntPolicy<2>>::Data
+{
+    MFinishBoundFunctionInit(MDefinition* bound, MDefinition* target, MDefinition* argCount)
+      : MTernaryInstruction(classOpcode, bound, target, argCount)
+    { }
+
+  public:
+    INSTRUCTION_HEADER(FinishBoundFunctionInit)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, bound), (1, target), (2, argCount))
+};
+
+class MWasmSelect
+  : public MTernaryInstruction,
+    public NoTypePolicy::Data
+{
+    MWasmSelect(MDefinition* trueExpr, MDefinition* falseExpr, MDefinition *condExpr)
+      : MTernaryInstruction(classOpcode, trueExpr, falseExpr, condExpr)
+    {
+        MOZ_ASSERT(condExpr->type() == MIRType::Int32);
+        MOZ_ASSERT(trueExpr->type() == falseExpr->type());
+        setResultType(trueExpr->type());
+        setMovable();
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmSelect)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, trueExpr), (1, falseExpr), (2, condExpr))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    ALLOW_CLONE(MWasmSelect)
+};
+
+// Generic constructor of SIMD valuesX4.
+class MSimdValueX4
+  : public MQuaternaryInstruction,
+    public MixPolicy<SimdScalarPolicy<0>, SimdScalarPolicy<1>,
+                     SimdScalarPolicy<2>, SimdScalarPolicy<3> >::Data
+{
+  protected:
+    MSimdValueX4(MIRType type, MDefinition* x, MDefinition* y, MDefinition* z, MDefinition* w)
+      : MQuaternaryInstruction(classOpcode, x, y, z, w)
+    {
+        MOZ_ASSERT(IsSimdType(type));
+        MOZ_ASSERT(SimdTypeToLength(type) == 4);
+
+        setMovable();
+        setResultType(type);
+    }
+
+  public:
+    INSTRUCTION_HEADER(SimdValueX4)
+    TRIVIAL_NEW_WRAPPERS
+
+    bool canConsumeFloat32(MUse* use) const override {
+        return SimdTypeToLaneType(type()) == MIRType::Float32;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool congruentTo(const MDefinition* ins) const override {
+        return congruentIfOperandsEqual(ins);
+    }
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    ALLOW_CLONE(MSimdValueX4)
+};
+
+// Try to store a value to a dense array slots vector. May fail due to the object being frozen.
+// Cannot be used on an object that has extra indexed properties.
+class MFallibleStoreElement
+  : public MQuaternaryInstruction,
+    public MStoreElementCommon,
+    public MixPolicy<SingleObjectPolicy, NoFloatPolicy<3> >::Data
+{
+    bool strict_;
+
+    MFallibleStoreElement(MDefinition* object, MDefinition* elements,
+                          MDefinition* index, MDefinition* value,
+                          bool strict)
+      : MQuaternaryInstruction(classOpcode, object, elements, index, value),
+        strict_(strict)
+    {
+        MOZ_ASSERT(elements->type() == MIRType::Elements);
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(FallibleStoreElement)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, elements), (2, index), (3, value))
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
+    }
+    bool strict() const {
+        return strict_;
+    }
+
+    ALLOW_CLONE(MFallibleStoreElement)
+};
+
+
+// Store an unboxed object or null pointer to a v\ector.
+class MStoreUnboxedObjectOrNull
+  : public MQuaternaryInstruction,
+    public StoreUnboxedObjectOrNullPolicy::Data
+{
+    int32_t offsetAdjustment_;
+    bool preBarrier_;
+
+    MStoreUnboxedObjectOrNull(MDefinition* elements, MDefinition* index,
+                              MDefinition* value, MDefinition* typedObj,
+                              int32_t offsetAdjustment = 0, bool preBarrier = true)
+      : MQuaternaryInstruction(classOpcode, elements, index, value, typedObj),
+        offsetAdjustment_(offsetAdjustment),
+        preBarrier_(preBarrier)
+    {
+        MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+        MOZ_ASSERT(typedObj->type() == MIRType::Object);
+    }
+
+  public:
+    INSTRUCTION_HEADER(StoreUnboxedObjectOrNull)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index), (2, value), (3, typedObj))
+
+    int32_t offsetAdjustment() const {
+        return offsetAdjustment_;
+    }
+    bool preBarrier() const {
+        return preBarrier_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::UnboxedElement);
+    }
+
+    // For StoreUnboxedObjectOrNullPolicy.
+    void setValue(MDefinition* def) {
+        replaceOperand(2, def);
+    }
+
+    ALLOW_CLONE(MStoreUnboxedObjectOrNull)
+};
+
+class MStoreTypedArrayElementHole
+  : public MQuaternaryInstruction,
+    public StoreUnboxedScalarBase,
+    public StoreTypedArrayHolePolicy::Data
+{
+    MStoreTypedArrayElementHole(MDefinition* elements, MDefinition* length, MDefinition* index,
+                                MDefinition* value, Scalar::Type arrayType)
+      : MQuaternaryInstruction(classOpcode, elements, length, index, value),
+        StoreUnboxedScalarBase(arrayType)
+    {
+        setMovable();
+        MOZ_ASSERT(elements->type() == MIRType::Elements);
+        MOZ_ASSERT(length->type() == MIRType::Int32);
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+        MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::MaxTypedArrayViewType);
+    }
+
+  public:
+    INSTRUCTION_HEADER(StoreTypedArrayElementHole)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, length), (2, index), (3, value))
+
+    Scalar::Type arrayType() const {
+        MOZ_ASSERT(!Scalar::isSimdType(writeType()),
+                   "arrayType == writeType iff the write type isn't SIMD");
+        return writeType();
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::UnboxedElement);
+    }
+    TruncateKind operandTruncateKind(size_t index) const override;
+
+    bool canConsumeFloat32(MUse* use) const override {
+        return use == getUseFor(3) && arrayType() == Scalar::Float32;
+    }
+
+    ALLOW_CLONE(MStoreTypedArrayElementHole)
+};
+
+// Test whether the index is in the array bounds or a hole.
+class MInArray
+  : public MQuaternaryInstruction,
+    public ObjectPolicy<3>::Data
+{
+    bool needsHoleCheck_;
+    bool needsNegativeIntCheck_;
+
+    MInArray(MDefinition* elements, MDefinition* index,
+             MDefinition* initLength, MDefinition* object,
+             bool needsHoleCheck)
+      : MQuaternaryInstruction(classOpcode, elements, index, initLength, object),
+        needsHoleCheck_(needsHoleCheck),
+        needsNegativeIntCheck_(true)
+    {
+        setResultType(MIRType::Boolean);
+        setMovable();
+        MOZ_ASSERT(elements->type() == MIRType::Elements);
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+        MOZ_ASSERT(initLength->type() == MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(InArray)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index), (2, initLength), (3, object))
+
+    bool needsHoleCheck() const {
+        return needsHoleCheck_;
+    }
+    bool needsNegativeIntCheck() const {
+        return needsNegativeIntCheck_;
+    }
+    void collectRangeInfoPreTrunc() override;
+    AliasSet getAliasSet() const override {
+        return AliasSet::Load(AliasSet::Element);
+    }
+    bool congruentTo(const MDefinition* ins) const override {
+        if (!ins->isInArray())
+            return false;
+        const MInArray* other = ins->toInArray();
+        if (needsHoleCheck() != other->needsHoleCheck())
+            return false;
+        if (needsNegativeIntCheck() != other->needsNegativeIntCheck())
+            return false;
+        return congruentIfOperandsEqual(other);
+    }
+};
+
+class MCompareExchangeTypedArrayElement
+  : public MQuaternaryInstruction,
+    public MixPolicy<ObjectPolicy<0>, IntPolicy<1>, TruncateToInt32Policy<2>, TruncateToInt32Policy<3>>::Data
+{
+    Scalar::Type arrayType_;
+
+    explicit MCompareExchangeTypedArrayElement(MDefinition* elements, MDefinition* index,
+                                               Scalar::Type arrayType, MDefinition* oldval,
+                                               MDefinition* newval)
+      : MQuaternaryInstruction(classOpcode, elements, index, oldval, newval),
+        arrayType_(arrayType)
+    {
+        setGuard();             // Not removable
+    }
+
+  public:
+    INSTRUCTION_HEADER(CompareExchangeTypedArrayElement)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, elements), (1, index), (2, oldval), (3, newval))
+
+    bool isByteArray() const {
+        return (arrayType_ == Scalar::Int8 ||
+                arrayType_ == Scalar::Uint8);
+    }
+    int oldvalOperand() {
+        return 2;
+    }
+    Scalar::Type arrayType() const {
+        return arrayType_;
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::UnboxedElement);
+    }
+};
+
+class MQuaternaryInstruction : public MAryInstruction<4>
+{
+  protected:
+    MQuaternaryInstruction(Opcode op,
+                           MDefinition* first, MDefinition* second,
+                           MDefinition* third, MDefinition* fourth)
+      : MAryInstruction(op)
+    {
+        initOperand(0, first);
+        initOperand(1, second);
+        initOperand(2, third);
+        initOperand(3, fourth);
+    }
+
+    HashNumber valueHash() const override;
+};
+
+// Like MStoreElement, but supports indexes >= initialized length. The downside
+// is that we cannot hoist the elements vector and bounds check, since this
+// instruction may update the (initialized) length and reallocate the elements
+// vector.
+class MStoreElementHole
+  : public MQuaternaryInstruction,
+    public MStoreElementCommon,
+    public MixPolicy<SingleObjectPolicy, NoFloatPolicy<3> >::Data
+{
+    MStoreElementHole(MDefinition* object, MDefinition* elements,
+                      MDefinition* index, MDefinition* value)
+      : MQuaternaryInstruction(classOpcode, object, elements, index, value)
+    {
+        MOZ_ASSERT(elements->type() == MIRType::Elements);
+        MOZ_ASSERT(index->type() == MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(StoreElementHole)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, object), (1, elements), (2, index), (3, value))
+
+    AliasSet getAliasSet() const override {
+        // StoreElementHole can update the initialized length, the array length
+        // or reallocate obj->elements.
+        return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
+    }
+
+    ALLOW_CLONE(MStoreElementHole)
+};
+
+#endif /* jit_MIRInstruction_h */
\ No newline at end of file
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -15,16 +15,17 @@
 #include "gc/GCTrace.h"
 #include "jit/AtomicOp.h"
 #include "jit/Bailouts.h"
 #include "jit/BaselineFrame.h"
 #include "jit/BaselineIC.h"
 #include "jit/BaselineJIT.h"
 #include "jit/Lowering.h"
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 #include "js/Conversions.h"
 #include "vm/TraceLogging.h"
 
 #include "jsobjinlines.h"
 
 #include "gc/Nursery-inl.h"
 #include "jit/shared/Lowering-shared-inl.h"
 #include "vm/Interpreter-inl.h"
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -9,16 +9,17 @@
 #include "mozilla/MathAlgorithms.h"
 
 #include "jit/Ion.h"
 #include "jit/IonAnalysis.h"
 #include "jit/JitSpewer.h"
 #include "jit/MIR.h"
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "js/Conversions.h"
 #include "vm/ArgumentsObject.h"
 #include "vm/TypedArrayObject.h"
 
 #include "jsopcodeinlines.h"
 
 using namespace js;
 using namespace js::jit;
--- a/js/src/jit/Recover.cpp
+++ b/js/src/jit/Recover.cpp
@@ -19,16 +19,17 @@
 #include "builtin/TypedObject.h"
 
 #include "gc/Heap.h"
 
 #include "jit/JitSpewer.h"
 #include "jit/JSJitFrameIter.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "jit/VMFunctions.h"
 #include "vm/Interpreter.h"
 #include "vm/String.h"
 
 #include "vm/Interpreter-inl.h"
 #include "vm/NativeObject-inl.h"
 
 using namespace js;
--- a/js/src/jit/Recover.h
+++ b/js/src/jit/Recover.h
@@ -7,16 +7,17 @@
 #ifndef jit_Recover_h
 #define jit_Recover_h
 
 #include "mozilla/Attributes.h"
 
 #include "jsarray.h"
 
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 #include "jit/Snapshots.h"
 
 struct JSContext;
 
 namespace js {
 namespace jit {
 
 // This file contains all recover instructions.
--- a/js/src/jit/ScalarReplacement.cpp
+++ b/js/src/jit/ScalarReplacement.cpp
@@ -8,16 +8,17 @@
 
 #include "mozilla/Vector.h"
 
 #include "jit/IonAnalysis.h"
 #include "jit/JitSpewer.h"
 #include "jit/MIR.h"
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "vm/UnboxedObject.h"
 
 #include "jsobjinlines.h"
 
 namespace js {
 namespace jit {
 
 template <typename MemoryView>
--- a/js/src/jit/ValueNumbering.cpp
+++ b/js/src/jit/ValueNumbering.cpp
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/ValueNumbering.h"
 
 #include "jit/AliasAnalysis.h"
 #include "jit/IonAnalysis.h"
 #include "jit/JitSpewer.h"
 #include "jit/MIRGenerator.h"
+#include "jit/MIRInstruction.h"
 
 using namespace js;
 using namespace js::jit;
 
 /*
  * Some notes on the main algorithm here:
  *  - The SSA identifier id() is the value number. We do replaceAllUsesWith as
  *    we go, so there's always at most one visible value with a given number.
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -12,16 +12,17 @@
 #include "jscompartment.h"
 #include "jsnum.h"
 
 #include "jit/CodeGenerator.h"
 #include "jit/JitCompartment.h"
 #include "jit/JitFrames.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "js/Conversions.h"
 #include "vm/Shape.h"
 #include "vm/TraceLogging.h"
 
 #include "jsscriptinlines.h"
 
 #include "jit/MacroAssembler-inl.h"
 #include "jit/shared/CodeGenerator-shared-inl.h"
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_arm_CodeGenerator_arm_h
 #define jit_arm_CodeGenerator_arm_h
 
+#include "jit/MIRInstruction.h"
 #include "jit/arm/Assembler-arm.h"
 #include "jit/shared/CodeGenerator-shared.h"
 
 namespace js {
 namespace jit {
 
 class OutOfLineBailout;
 class OutOfLineTableSwitch;
--- a/js/src/jit/arm/LIR-arm.h
+++ b/js/src/jit/arm/LIR-arm.h
@@ -2,16 +2,18 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_arm_LIR_arm_h
 #define jit_arm_LIR_arm_h
 
+#include "jit/MIRInstruction.h"
+
 namespace js {
 namespace jit {
 
 class LBoxFloatingPoint : public LInstructionHelper<2, 1, 1>
 {
     MIRType type_;
 
   public:
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mozilla/MathAlgorithms.h"
 
 #include "jit/arm/Assembler-arm.h"
 #include "jit/Lowering.h"
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 
 #include "jit/shared/Lowering-shared-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::FloorLog2;
 
--- a/js/src/jit/arm/Lowering-arm.h
+++ b/js/src/jit/arm/Lowering-arm.h
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_arm_Lowering_arm_h
 #define jit_arm_Lowering_arm_h
 
+#include "jit/MIRInstruction.h"
 #include "jit/shared/Lowering-shared.h"
 
 namespace js {
 namespace jit {
 
 class LIRGeneratorARM : public LIRGeneratorShared
 {
   public:
--- a/js/src/jit/arm64/Lowering-arm64.cpp
+++ b/js/src/jit/arm64/Lowering-arm64.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mozilla/MathAlgorithms.h"
 
 #include "jit/arm64/Assembler-arm64.h"
 #include "jit/Lowering.h"
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 
 #include "jit/shared/Lowering-shared-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::FloorLog2;
 
--- a/js/src/jit/arm64/Lowering-arm64.h
+++ b/js/src/jit/arm64/Lowering-arm64.h
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_arm64_Lowering_arm64_h
 #define jit_arm64_Lowering_arm64_h
 
+#include "jit/MIRInstruction.h"
 #include "jit/shared/Lowering-shared.h"
 
 namespace js {
 namespace jit {
 
 class LIRGeneratorARM64 : public LIRGeneratorShared
 {
   public:
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -13,16 +13,17 @@
 #include "jscompartment.h"
 #include "jsnum.h"
 
 #include "jit/CodeGenerator.h"
 #include "jit/JitCompartment.h"
 #include "jit/JitFrames.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "js/Conversions.h"
 #include "vm/Shape.h"
 #include "vm/TraceLogging.h"
 
 #include "jsscriptinlines.h"
 
 #include "jit/MacroAssembler-inl.h"
 #include "jit/shared/CodeGenerator-shared-inl.h"
--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/mips-shared/Lowering-mips-shared.h"
 
 #include "mozilla/MathAlgorithms.h"
 
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 
 #include "jit/shared/Lowering-shared-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::FloorLog2;
 
--- a/js/src/jit/mips-shared/Lowering-mips-shared.h
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.h
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_mips_shared_Lowering_mips_shared_h
 #define jit_mips_shared_Lowering_mips_shared_h
 
+#include "jit/MIRInstruction.h"
 #include "jit/shared/Lowering-shared.h"
 
 namespace js {
 namespace jit {
 
 class LIRGeneratorMIPSShared : public LIRGeneratorShared
 {
   protected:
--- a/js/src/jit/mips32/CodeGenerator-mips32.cpp
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -8,16 +8,17 @@
 
 #include "mozilla/MathAlgorithms.h"
 
 #include "jit/CodeGenerator.h"
 #include "jit/JitCompartment.h"
 #include "jit/JitFrames.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "js/Conversions.h"
 #include "vm/Shape.h"
 #include "vm/TraceLogging.h"
 
 #include "jit/MacroAssembler-inl.h"
 #include "jit/shared/CodeGenerator-shared-inl.h"
 
 using namespace js;
--- a/js/src/jit/mips32/LIR-mips32.h
+++ b/js/src/jit/mips32/LIR-mips32.h
@@ -2,16 +2,18 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_mips32_LIR_mips32_h
 #define jit_mips32_LIR_mips32_h
 
+#include "jit/MIRInstruction.h"
+
 namespace js {
 namespace jit {
 
 class LBoxFloatingPoint : public LInstructionHelper<2, 1, 1>
 {
     MIRType type_;
 
   public:
--- a/js/src/jit/mips32/Lowering-mips32.cpp
+++ b/js/src/jit/mips32/Lowering-mips32.cpp
@@ -4,17 +4,18 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/mips32/Lowering-mips32.h"
 
 #include "jit/mips32/Assembler-mips32.h"
 
 #include "jit/MIR.h"
-
+#include "jit/MIRInstruction.h"
+ 
 #include "jit/shared/Lowering-shared-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 LBoxAllocation
 LIRGeneratorMIPS::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart)
 {
--- a/js/src/jit/mips32/Lowering-mips32.h
+++ b/js/src/jit/mips32/Lowering-mips32.h
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_mips32_Lowering_mips32_h
 #define jit_mips32_Lowering_mips32_h
 
+#include "jit/MIRInstruction.h"
 #include "jit/mips-shared/Lowering-mips-shared.h"
 
 namespace js {
 namespace jit {
 
 class LIRGeneratorMIPS : public LIRGeneratorMIPSShared
 {
   protected:
--- a/js/src/jit/mips64/CodeGenerator-mips64.cpp
+++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp
@@ -8,16 +8,17 @@
 
 #include "mozilla/MathAlgorithms.h"
 
 #include "jit/CodeGenerator.h"
 #include "jit/JitCompartment.h"
 #include "jit/JitFrames.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "js/Conversions.h"
 #include "vm/Shape.h"
 #include "vm/TraceLogging.h"
 
 #include "jit/MacroAssembler-inl.h"
 #include "jit/shared/CodeGenerator-shared-inl.h"
 
 using namespace js;
--- a/js/src/jit/mips64/LIR-mips64.h
+++ b/js/src/jit/mips64/LIR-mips64.h
@@ -2,16 +2,18 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_mips64_LIR_mips64_h
 #define jit_mips64_LIR_mips64_h
 
+#include "jit/MIRInstruction.h"
+
 namespace js {
 namespace jit {
 
 class LUnbox : public LInstructionHelper<1, 1, 0>
 {
   public:
     LIR_HEADER(Unbox);
 
--- a/js/src/jit/mips64/Lowering-mips64.cpp
+++ b/js/src/jit/mips64/Lowering-mips64.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/mips64/Lowering-mips64.h"
 
 #include "jit/mips64/Assembler-mips64.h"
 
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 
 #include "jit/shared/Lowering-shared-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 void
 LIRGeneratorMIPS64::defineInt64Phi(MPhi* phi, size_t lirIndex)
--- a/js/src/jit/none/Lowering-none.h
+++ b/js/src/jit/none/Lowering-none.h
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_none_Lowering_none_h
 #define jit_none_Lowering_none_h
 
+#include "jit/MIRInstruction.h"
 #include "jit/shared/Lowering-shared.h"
 
 namespace js {
 namespace jit {
 
 class LIRGeneratorNone : public LIRGeneratorShared
 {
   public:
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -9,16 +9,17 @@
 #include "mozilla/DebugOnly.h"
 
 #include "jit/CompactBuffer.h"
 #include "jit/JitcodeMap.h"
 #include "jit/JitSpewer.h"
 #include "jit/MacroAssembler.h"
 #include "jit/MIR.h"
 #include "jit/MIRGenerator.h"
+#include "jit/MIRInstruction.h"
 #include "jit/OptimizationTracking.h"
 #include "js/Conversions.h"
 #include "vm/TraceLogging.h"
 
 #include "jit/JitFrames-inl.h"
 #include "jit/MacroAssembler-inl.h"
 
 using namespace js;
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -11,16 +11,17 @@
 #include "mozilla/Move.h"
 #include "mozilla/TypeTraits.h"
 
 #include "jit/JitFrames.h"
 #include "jit/LIR.h"
 #include "jit/MacroAssembler.h"
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "jit/OptimizationTracking.h"
 #include "jit/Safepoints.h"
 #include "jit/Snapshots.h"
 #include "jit/VMFunctions.h"
 
 namespace js {
 namespace jit {
 
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_shared_LIR_shared_h
 #define jit_shared_LIR_shared_h
 
 #include "jsutil.h"
 
 #include "jit/AtomicOp.h"
+#include "jit/MIRInstruction.h"
 #include "jit/shared/Assembler-shared.h"
 
 // This file declares LIR instructions that are common to every platform.
 
 namespace js {
 namespace jit {
 
 class LBox : public LInstructionHelper<BOX_PIECES, 1, 0>
--- a/js/src/jit/shared/Lowering-shared-inl.h
+++ b/js/src/jit/shared/Lowering-shared-inl.h
@@ -6,16 +6,17 @@
 
 #ifndef jit_shared_Lowering_shared_inl_h
 #define jit_shared_Lowering_shared_inl_h
 
 #include "jit/shared/Lowering-shared.h"
 
 #include "jit/MIR.h"
 #include "jit/MIRGenerator.h"
+#include "jit/MIRInstruction.h"
 
 namespace js {
 namespace jit {
 
 void
 LIRGeneratorShared::emitAtUses(MInstruction* mir)
 {
     MOZ_ASSERT(mir->canEmitAtUses());
--- a/js/src/jit/shared/Lowering-shared.cpp
+++ b/js/src/jit/shared/Lowering-shared.cpp
@@ -3,16 +3,17 @@
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/shared/Lowering-shared-inl.h"
 
 #include "jit/LIR.h"
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 
 #include "vm/Symbol.h"
 
 using namespace js;
 using namespace jit;
 
 bool
 LIRGeneratorShared::ShouldReorderCommutative(MDefinition* lhs, MDefinition* rhs, MInstruction* ins)
--- a/js/src/jit/shared/Lowering-shared.h
+++ b/js/src/jit/shared/Lowering-shared.h
@@ -7,16 +7,17 @@
 #ifndef jit_shared_Lowering_shared_h
 #define jit_shared_Lowering_shared_h
 
 // This file declares the structures that are used for attaching LIR to a
 // MIRGraph.
 
 #include "jit/LIR.h"
 #include "jit/MIRGenerator.h"
+#include "jit/MIRInstruction.h"
 
 namespace js {
 namespace jit {
 
 class MIRGenerator;
 class MIRGraph;
 class MDefinition;
 class MInstruction;
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/x64/CodeGenerator-x64.h"
 
 #include "mozilla/MathAlgorithms.h"
 
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 
 #include "jsscriptinlines.h"
 
 #include "jit/MacroAssembler-inl.h"
 #include "jit/shared/CodeGenerator-shared-inl.h"
 
 using namespace js;
 using namespace js::jit;
--- a/js/src/jit/x64/LIR-x64.h
+++ b/js/src/jit/x64/LIR-x64.h
@@ -2,16 +2,18 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_x64_LIR_x64_h
 #define jit_x64_LIR_x64_h
 
+#include "jit/MIRInstruction.h"
+
 namespace js {
 namespace jit {
 
 // Given an untyped input, guards on whether it's a specific type and returns
 // the unboxed payload.
 class LUnboxBase : public LInstructionHelper<1, 1, 0>
 {
   public:
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/x64/Lowering-x64.h"
 
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 #include "jit/x64/Assembler-x64.h"
 
 #include "jit/shared/Lowering-shared-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 LBoxAllocation
--- a/js/src/jit/x64/Lowering-x64.h
+++ b/js/src/jit/x64/Lowering-x64.h
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_x64_Lowering_x64_h
 #define jit_x64_Lowering_x64_h
 
+#include "jit/MIRInstruction.h"
 #include "jit/x86-shared/Lowering-x86-shared.h"
 
 namespace js {
 namespace jit {
 
 class LIRGeneratorX64 : public LIRGeneratorX86Shared
 {
   public:
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -9,16 +9,17 @@
 #include "mozilla/DebugOnly.h"
 #include "mozilla/MathAlgorithms.h"
 
 #include "jsmath.h"
 
 #include "jit/JitCompartment.h"
 #include "jit/JitFrames.h"
 #include "jit/Linker.h"
+#include "jit/MIRInstruction.h"
 #include "jit/RangeAnalysis.h"
 #include "vm/TraceLogging.h"
 
 #include "jit/MacroAssembler-inl.h"
 #include "jit/shared/CodeGenerator-shared-inl.h"
 
 using namespace js;
 using namespace js::jit;
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/x86-shared/Lowering-x86-shared.h"
 
 #include "mozilla/MathAlgorithms.h"
 
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 
 #include "jit/shared/Lowering-shared-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::Abs;
 using mozilla::FloorLog2;
--- a/js/src/jit/x86-shared/Lowering-x86-shared.h
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.h
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_x86_shared_Lowering_x86_shared_h
 #define jit_x86_shared_Lowering_x86_shared_h
 
+#include "jit/MIRInstruction.h"
 #include "jit/shared/Lowering-shared.h"
 
 namespace js {
 namespace jit {
 
 class LIRGeneratorX86Shared : public LIRGeneratorShared
 {
   protected:
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/x86/Lowering-x86.h"
 
 #include "jit/MIR.h"
+#include "jit/MIRInstruction.h"
 #include "jit/x86/Assembler-x86.h"
 
 #include "jit/shared/Lowering-shared-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 LBoxAllocation
--- a/js/src/jit/x86/Lowering-x86.h
+++ b/js/src/jit/x86/Lowering-x86.h
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_x86_Lowering_x86_h
 #define jit_x86_Lowering_x86_h
 
+#include "jit/MIRInstruction.h"
 #include "jit/x86-shared/Lowering-x86-shared.h"
 
 namespace js {
 namespace jit {
 
 class LIRGeneratorX86 : public LIRGeneratorX86Shared
 {
   public:
--- a/js/src/jsapi-tests/testJitFoldsTo.cpp
+++ b/js/src/jsapi-tests/testJitFoldsTo.cpp
@@ -3,16 +3,17 @@
  */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/IonAnalysis.h"
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "jit/ValueNumbering.h"
 
 #include "jsapi-tests/testJitMinimalFunc.h"
 #include "jsapi-tests/tests.h"
 
 using namespace js;
 using namespace js::jit;
 
--- a/js/src/jsapi-tests/testJitGVN.cpp
+++ b/js/src/jsapi-tests/testJitGVN.cpp
@@ -3,16 +3,17 @@
  */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/IonAnalysis.h"
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "jit/RangeAnalysis.h"
 #include "jit/ValueNumbering.h"
 
 #include "jsapi-tests/testJitMinimalFunc.h"
 #include "jsapi-tests/tests.h"
 
 using namespace js;
 using namespace js::jit;
--- a/js/src/jsapi-tests/testJitRangeAnalysis.cpp
+++ b/js/src/jsapi-tests/testJitRangeAnalysis.cpp
@@ -5,16 +5,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mozilla/ArrayUtils.h"
 
 #include "jit/IonAnalysis.h"
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
+#include "jit/MIRInstruction.h"
 #include "jit/RangeAnalysis.h"
 
 #include "jsapi-tests/testJitMinimalFunc.h"
 #include "jsapi-tests/tests.h"
 
 using namespace js;
 using namespace js::jit;
 
new file mode 100644
--- /dev/null
+++ b/js/src/wasm/WasmBaselineCompile.cpp.orig
@@ -0,0 +1,8804 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *MOZ
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* WebAssembly baseline compiler ("RabaldrMonkey")
+ *
+ * General status notes:
+ *
+ * "FIXME" indicates a known or suspected bug.  Always has a bug#.
+ *
+ * "TODO" indicates an opportunity for a general improvement, with an additional
+ * tag to indicate the area of improvement.  Usually has a bug#.
+ *
+ * There are lots of machine dependencies here but they are pretty well isolated
+ * to a segment of the compiler.  Many dependencies will eventually be factored
+ * into the MacroAssembler layer and shared with other code generators.
+ *
+ *
+ * High-value compiler performance improvements:
+ *
+ * - (Bug 1316802) The specific-register allocator (the needI32(r), needI64(r)
+ *   etc methods) can avoid syncing the value stack if the specific register is
+ *   in use but there is a free register to shuffle the specific register into.
+ *   (This will also improve the generated code.)  The sync happens often enough
+ *   here to show up in profiles, because it is triggered by integer multiply
+ *   and divide.
+ *
+ *
+ * High-value code generation improvements:
+ *
+ * - (Bug 1316804) brTable pessimizes by always dispatching to code that pops
+ *   the stack and then jumps to the code for the target case.  If no cleanup is
+ *   needed we could just branch conditionally to the target; if the same amount
+ *   of cleanup is needed for all cases then the cleanup can be done before the
+ *   dispatch.  Both are highly likely.
+ *
+ * - (Bug 1316806) Register management around calls: At the moment we sync the
+ *   value stack unconditionally (this is simple) but there are probably many
+ *   common cases where we could instead save/restore live caller-saves
+ *   registers and perform parallel assignment into argument registers.  This
+ *   may be important if we keep some locals in registers.
+ *
+ * - (Bug 1316808) Allocate some locals to registers on machines where there are
+ *   enough registers.  This is probably hard to do well in a one-pass compiler
+ *   but it might be that just keeping register arguments and the first few
+ *   locals in registers is a viable strategy; another (more general) strategy
+ *   is caching locals in registers in straight-line code.  Such caching could
+ *   also track constant values in registers, if that is deemed valuable.  A
+ *   combination of techniques may be desirable: parameters and the first few
+ *   locals could be cached on entry to the function but not statically assigned
+ *   to registers throughout.
+ *
+ *   (On a large corpus of code it should be possible to compute, for every
+ *   signature comprising the types of parameters and locals, and using a static
+ *   weight for loops, a list in priority order of which parameters and locals
+ *   that should be assigned to registers.  Or something like that.  Wasm makes
+ *   this simple.  Static assignments are desirable because they are not flushed
+ *   to memory by the pre-block sync() call.)
+ */
+
+#include "wasm/WasmBaselineCompile.h"
+
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+
+#include "jit/AtomicOp.h"
+#include "jit/IonTypes.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/Label.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MIR.h"
+#include "jit/RegisterAllocator.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#if defined(JS_CODEGEN_ARM)
+# include "jit/arm/Assembler-arm.h"
+#endif
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+# include "jit/x86-shared/Architecture-x86-shared.h"
+# include "jit/x86-shared/Assembler-x86-shared.h"
+#endif
+
+#include "wasm/WasmBinaryIterator.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmSignalHandlers.h"
+#include "wasm/WasmValidate.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using mozilla::DebugOnly;
+using mozilla::FloatingPoint;
+using mozilla::FloorLog2;
+using mozilla::IsPowerOfTwo;
+using mozilla::Maybe;
+using mozilla::SpecificNaN;
+
+namespace js {
+namespace wasm {
+
+using namespace js::jit;
+using JS::GenericNaN;
+
+typedef bool HandleNaNSpecially;
+typedef bool InvertBranch;
+typedef bool IsKnownNotZero;
+typedef bool IsSigned;
+typedef bool IsUnsigned;
+typedef bool NeedsBoundsCheck;
+typedef bool PopStack;
+typedef bool WantResult;
+typedef bool ZeroOnOverflow;
+
+typedef unsigned ByteSize;
+typedef unsigned BitSize;
+
+// UseABI::Wasm implies that the Tls/Heap/Global registers are nonvolatile,
+// except when InterModule::True is also set, when they are volatile.
+//
+// UseABI::System implies that the Tls/Heap/Global registers are volatile.
+// Additionally, the parameter passing mechanism may be slightly different from
+// the UseABI::Wasm convention.
+//
+// When the Tls/Heap/Global registers are not volatile, the baseline compiler
+// will restore the Tls register from its save slot before the call, since the
+// baseline compiler uses the Tls register for other things.
+//
+// When those registers are volatile, the baseline compiler will reload them
+// after the call (it will restore the Tls register from the save slot and load
+// the other two from the Tls data).
+
+enum class UseABI { Wasm, System };
+enum class InterModule { False = false, True = true };
+
+#ifdef JS_CODEGEN_ARM64
+// FIXME: This is not correct, indeed for ARM64 there is no reliable
+// StackPointer and we'll need to change the abstractions that use
+// SP-relative addressing.  There's a guard in emitFunction() below to
+// prevent this workaround from having any consequence.  This hack
+// exists only as a stopgap; there is no ARM64 JIT support yet.
+static const Register StackPointer = RealStackPointer;
+#endif
+
+#ifdef JS_CODEGEN_X86
+// The selection of EBX here steps gingerly around: the need for EDX
+// to be allocatable for multiply/divide; ECX to be allocatable for
+// shift/rotate; EAX (= ReturnReg) to be allocatable as the joinreg;
+// EBX not being one of the WasmTableCall registers; and needing a
+// temp register for load/store that has a single-byte persona.
+//
+// The compiler assumes that ScratchRegX86 has a single-byte persona.
+// Code for 8-byte atomic operations assumes that ScratchRegX86 is in
+// fact ebx.
+static const Register ScratchRegX86 = ebx;
+
+# define RABALDR_INT_DIV_I64_CALLOUT
+#endif
+
+#ifdef JS_CODEGEN_ARM
+// We need a temp for funcPtrCall.  It can't be any of the
+// WasmTableCall registers, an argument register, or a scratch
+// register, and probably should not be ReturnReg.
+static const Register FuncPtrCallTemp = CallTempReg1;
+
+// We use our own scratch register, because the macro assembler uses
+// the regular scratch register(s) pretty liberally.  We could
+// work around that in several cases but the mess does not seem
+// worth it yet.  CallTempReg2 seems safe.
+static const Register ScratchRegARM = CallTempReg2;
+
+# define RABALDR_INT_DIV_I64_CALLOUT
+# define RABALDR_I64_TO_FLOAT_CALLOUT
+# define RABALDR_FLOAT_TO_I64_CALLOUT
+#endif
+
+template<MIRType t>
+struct RegTypeOf {
+    static_assert(t == MIRType::Float32 || t == MIRType::Double, "Float mask type");
+};
+
+template<> struct RegTypeOf<MIRType::Float32> {
+    static constexpr RegTypeName value = RegTypeName::Float32;
+};
+template<> struct RegTypeOf<MIRType::Double> {
+    static constexpr RegTypeName value = RegTypeName::Float64;
+};
+
+BaseLocalIter::BaseLocalIter(const ValTypeVector& locals,
+                             size_t argsLength,
+                             bool debugEnabled)
+  : locals_(locals),
+    argsLength_(argsLength),
+    argsRange_(locals.begin(), argsLength),
+    argsIter_(argsRange_),
+    index_(0),
+    localSize_(debugEnabled ? DebugFrame::offsetOfFrame() : 0),
+    reservedSize_(localSize_),
+    done_(false)
+{
+    MOZ_ASSERT(argsLength <= locals.length());
+
+    settle();
+}
+
+int32_t
+BaseLocalIter::pushLocal(size_t nbytes)
+{
+    if (nbytes == 8)
+        localSize_ = AlignBytes(localSize_, 8u);
+    else if (nbytes == 16)
+        localSize_ = AlignBytes(localSize_, 16u);
+    localSize_ += nbytes;
+    return localSize_;          // Locals grow down so capture base address
+}
+
+void
+BaseLocalIter::settle()
+{
+    if (index_ < argsLength_) {
+        MOZ_ASSERT(!argsIter_.done());
+        mirType_ = argsIter_.mirType();
+        switch (mirType_) {
+          case MIRType::Int32:
+            if (argsIter_->argInRegister())
+                frameOffset_ = pushLocal(4);
+            else
+                frameOffset_ = -(argsIter_->offsetFromArgBase() + sizeof(Frame));
+            break;
+          case MIRType::Int64:
+            if (argsIter_->argInRegister())
+                frameOffset_ = pushLocal(8);
+            else
+                frameOffset_ = -(argsIter_->offsetFromArgBase() + sizeof(Frame));
+            break;
+          case MIRType::Double:
+            if (argsIter_->argInRegister())
+                frameOffset_ = pushLocal(8);
+            else
+                frameOffset_ = -(argsIter_->offsetFromArgBase() + sizeof(Frame));
+            break;
+          case MIRType::Float32:
+            if (argsIter_->argInRegister())
+                frameOffset_ = pushLocal(4);
+            else
+                frameOffset_ = -(argsIter_->offsetFromArgBase() + sizeof(Frame));
+            break;
+          default:
+            MOZ_CRASH("Argument type");
+        }
+        return;
+    }
+
+    MOZ_ASSERT(argsIter_.done());
+    if (index_ < locals_.length()) {
+        switch (locals_[index_]) {
+          case ValType::I32:
+            mirType_ = jit::MIRType::Int32;
+            frameOffset_ = pushLocal(4);
+            break;
+          case ValType::F32:
+            mirType_ = jit::MIRType::Float32;
+            frameOffset_ = pushLocal(4);
+            break;
+          case ValType::F64:
+            mirType_ = jit::MIRType::Double;
+            frameOffset_ = pushLocal(8);
+            break;
+          case ValType::I64:
+            mirType_ = jit::MIRType::Int64;
+            frameOffset_ = pushLocal(8);
+            break;
+          default:
+            MOZ_CRASH("Compiler bug: Unexpected local type");
+        }
+        return;
+    }
+
+    done_ = true;
+}
+
+void
+BaseLocalIter::operator++(int)
+{
+    MOZ_ASSERT(!done_);
+    index_++;
+    if (!argsIter_.done())
+        argsIter_++;
+    settle();
+}
+
+// The strongly typed register wrappers are especially useful to distinguish
+// float registers from double registers, but they also clearly distinguish
+// 32-bit registers from 64-bit register pairs on 32-bit systems.
+
+struct RegI32 : public Register
+{
+    RegI32() : Register(Register::Invalid()) {}
+    explicit RegI32(Register reg) : Register(reg) {}
+};
+
+struct RegI64 : public Register64
+{
+    RegI64() : Register64(Register64::Invalid()) {}
+    explicit RegI64(Register64 reg) : Register64(reg) {}
+};
+
+struct RegF32 : public FloatRegister
+{
+    RegF32() : FloatRegister() {}
+    explicit RegF32(FloatRegister reg) : FloatRegister(reg) {}
+};
+
+struct RegF64 : public FloatRegister
+{
+    RegF64() : FloatRegister() {}
+    explicit RegF64(FloatRegister reg) : FloatRegister(reg) {}
+};
+
+struct AnyReg
+{
+    explicit AnyReg(RegI32 r) { tag = I32; i32_ = r; }
+    explicit AnyReg(RegI64 r) { tag = I64; i64_ = r; }
+    explicit AnyReg(RegF32 r) { tag = F32; f32_ = r; }
+    explicit AnyReg(RegF64 r) { tag = F64; f64_ = r; }
+
+    RegI32 i32() const {
+        MOZ_ASSERT(tag == I32);
+        return i32_;
+    }
+    RegI64 i64() const {
+        MOZ_ASSERT(tag == I64);
+        return i64_;
+    }
+    RegF32 f32() const {
+        MOZ_ASSERT(tag == F32);
+        return f32_;
+    }
+    RegF64 f64() const {
+        MOZ_ASSERT(tag == F64);
+        return f64_;
+    }
+    AnyRegister any() const {
+        switch (tag) {
+          case F32: return AnyRegister(f32_);
+          case F64: return AnyRegister(f64_);
+          case I32: return AnyRegister(i32_);
+          case I64:
+#ifdef JS_PUNBOX64
+            return AnyRegister(i64_.reg);
+#else
+            // The compiler is written so that this is never needed: any() is
+            // called on arbitrary registers for asm.js but asm.js does not have
+            // 64-bit ints.  For wasm, any() is called on arbitrary registers
+            // only on 64-bit platforms.
+            MOZ_CRASH("AnyReg::any() on 32-bit platform");
+#endif
+          default:
+            MOZ_CRASH();
+        }
+        // Work around GCC 5 analysis/warning bug.
+        MOZ_CRASH("AnyReg::any(): impossible case");
+    }
+
+    union {
+        RegI32 i32_;
+        RegI64 i64_;
+        RegF32 f32_;
+        RegF64 f64_;
+    };
+    enum { I32, I64, F32, F64 } tag;
+};
+
+class BaseCompilerInterface
+{
+  public:
+    // Spill all spillable registers.
+    //
+    // TODO / OPTIMIZE (Bug 1316802): It's possible to do better here by
+    // spilling only enough registers to satisfy current needs.
+    virtual void sync() = 0;
+};
+
+// Register allocator.
+
+class BaseRegAlloc
+{
+    // Notes on float register allocation.
+    //
+    // The general rule in SpiderMonkey is that float registers can alias double
+    // registers, but there are predicates to handle exceptions to that rule:
+    // hasUnaliasedDouble() and hasMultiAlias().  The way aliasing actually
+    // works is platform dependent and exposed through the aliased(n, &r)
+    // predicate, etc.
+    //
+    //  - hasUnaliasedDouble(): on ARM VFPv3-D32 there are double registers that
+    //    cannot be treated as float.
+    //  - hasMultiAlias(): on ARM and MIPS a double register aliases two float
+    //    registers.
+    //
+    // On some platforms (x86, x64, ARM64) but not all (ARM)
+    // ScratchFloat32Register is the same as ScratchDoubleRegister.
+    //
+    // It's a basic invariant of the AllocatableRegisterSet that it deals
+    // properly with aliasing of registers: if s0 or s1 are allocated then d0 is
+    // not allocatable; if s0 and s1 are freed individually then d0 becomes
+    // allocatable.
+
+    BaseCompilerInterface&        bc;
+    AllocatableGeneralRegisterSet availGPR;
+    AllocatableFloatRegisterSet   availFPU;
+#ifdef DEBUG
+    AllocatableGeneralRegisterSet allGPR;       // The registers available to the compiler
+    AllocatableFloatRegisterSet   allFPU;       //   after removing ScratchReg, HeapReg, etc
+    bool                          scratchTaken;
+#endif
+#ifdef JS_CODEGEN_X86
+    AllocatableGeneralRegisterSet singleByteRegs;
+#endif
+
+    bool hasGPR() {
+        return !availGPR.empty();
+    }
+
+    bool hasGPR64() {
+#ifdef JS_PUNBOX64
+        return !availGPR.empty();
+#else
+        if (availGPR.empty())
+            return false;
+        Register r = allocGPR();
+        bool available = !availGPR.empty();
+        freeGPR(r);
+        return available;
+#endif
+    }
+
+    template<MIRType t>
+    bool hasFPU() {
+        return availFPU.hasAny<RegTypeOf<t>::value>();
+    }
+
+    bool isAvailableGPR(Register r) {
+        return availGPR.has(r);
+    }
+
+    bool isAvailableFPU(FloatRegister r) {
+        return availFPU.has(r);
+    }
+
+    void allocGPR(Register r) {
+        MOZ_ASSERT(isAvailableGPR(r));
+        availGPR.take(r);
+    }
+
+    Register allocGPR() {
+        MOZ_ASSERT(hasGPR());
+        return availGPR.takeAny();
+    }
+
+    void allocInt64(Register64 r) {
+#ifdef JS_PUNBOX64
+        allocGPR(r.reg);
+#else
+        allocGPR(r.low);
+        allocGPR(r.high);
+#endif
+    }
+
+    Register64 allocInt64() {
+        MOZ_ASSERT(hasGPR64());
+#ifdef JS_PUNBOX64
+        return Register64(availGPR.takeAny());
+#else
+        Register high = availGPR.takeAny();
+        Register low = availGPR.takeAny();
+        return Register64(high, low);
+#endif
+    }
+
+#ifdef JS_CODEGEN_ARM
+    // r12 is normally the ScratchRegister and r13 is always the stack pointer,
+    // so the highest possible pair has r10 as the even-numbered register.
+
+    static const uint32_t pairLimit = 10;
+
+    bool hasGPRPair() {
+        for (uint32_t i = 0; i <= pairLimit; i += 2) {
+            if (isAvailableGPR(Register::FromCode(i)) && isAvailableGPR(Register::FromCode(i + 1)))
+                return true;
+        }
+        return false;
+    }
+
+    void allocGPRPair(Register* low, Register* high) {
+        MOZ_ASSERT(hasGPRPair());
+        for (uint32_t i = 0; i <= pairLimit; i += 2) {
+            if (isAvailableGPR(Register::FromCode(i)) &&
+                isAvailableGPR(Register::FromCode(i + 1)))
+            {
+                *low = Register::FromCode(i);
+                *high = Register::FromCode(i + 1);
+                allocGPR(*low);
+                allocGPR(*high);
+                return;
+            }
+        }
+        MOZ_CRASH("No pair");
+    }
+#endif
+
+    void allocFPU(FloatRegister r) {
+        MOZ_ASSERT(isAvailableFPU(r));
+        availFPU.take(r);
+    }
+
+    template<MIRType t>
+    FloatRegister allocFPU() {
+        return availFPU.takeAny<RegTypeOf<t>::value>();
+    }
+
+    void freeGPR(Register r) {
+        availGPR.add(r);
+    }
+
+    void freeInt64(Register64 r) {
+#ifdef JS_PUNBOX64
+        freeGPR(r.reg);
+#else
+        freeGPR(r.low);
+        freeGPR(r.high);
+#endif
+    }
+
+    void freeFPU(FloatRegister r) {
+        availFPU.add(r);
+    }
+
+  public:
+    explicit BaseRegAlloc(BaseCompilerInterface& bc)
+      : bc(bc)
+      , availGPR(GeneralRegisterSet::All())
+      , availFPU(FloatRegisterSet::All())
+#ifdef DEBUG
+      , scratchTaken(false)
+#endif
+#ifdef JS_CODEGEN_X86
+      , singleByteRegs(GeneralRegisterSet(Registers::SingleByteRegs))
+#endif
+    {
+        RegisterAllocator::takeWasmRegisters(availGPR);
+
+#if defined(JS_CODEGEN_ARM)
+        availGPR.take(ScratchRegARM);
+#elif defined(JS_CODEGEN_X86)
+        availGPR.take(ScratchRegX86);
+#endif
+
+#ifdef DEBUG
+        allGPR = availGPR;
+        allFPU = availFPU;
+#endif
+    }
+
+#ifdef DEBUG
+    bool scratchRegisterTaken() const {
+        return scratchTaken;
+    }
+
+    void setScratchRegisterTaken(bool state) {
+        scratchTaken = state;
+    }
+#endif
+
+#ifdef JS_CODEGEN_X86
+    bool isSingleByteI32(Register r) {
+        return singleByteRegs.has(r);
+    }
+#endif
+
+    bool isAvailableI32(RegI32 r) {
+        return isAvailableGPR(r);
+    }
+
+    bool isAvailableI64(RegI64 r) {
+#ifdef JS_PUNBOX64
+        return isAvailableGPR(r.reg);
+#else
+        return isAvailableGPR(r.low) && isAvailableGPR(r.high);
+#endif
+    }
+
+    bool isAvailableF32(RegF32 r) {
+        return isAvailableFPU(r);
+    }
+
+    bool isAvailableF64(RegF64 r) {
+        return isAvailableFPU(r);
+    }
+
+    // TODO / OPTIMIZE (Bug 1316802): Do not sync everything on allocation
+    // failure, only as much as we need.
+
+    MOZ_MUST_USE RegI32 needI32() {
+        if (!hasGPR())
+            bc.sync();
+        return RegI32(allocGPR());
+    }
+
+    void needI32(RegI32 specific) {
+        if (!isAvailableI32(specific))
+            bc.sync();
+        allocGPR(specific);
+    }
+
+    MOZ_MUST_USE RegI64 needI64() {
+        if (!hasGPR64())
+            bc.sync();
+        return RegI64(allocInt64());
+    }
+
+    void needI64(RegI64 specific) {
+        if (!isAvailableI64(specific))
+            bc.sync();
+        allocInt64(specific);
+    }
+
+    MOZ_MUST_USE RegF32 needF32() {
+        if (!hasFPU<MIRType::Float32>())
+            bc.sync();
+        return RegF32(allocFPU<MIRType::Float32>());
+    }
+
+    void needF32(RegF32 specific) {
+        if (!isAvailableF32(specific))
+            bc.sync();
+        allocFPU(specific);
+    }
+
+    MOZ_MUST_USE RegF64 needF64() {
+        if (!hasFPU<MIRType::Double>())
+            bc.sync();
+        return RegF64(allocFPU<MIRType::Double>());
+    }
+
+    void needF64(RegF64 specific) {
+        if (!isAvailableF64(specific))
+            bc.sync();
+        allocFPU(specific);
+    }
+
+    void freeI32(RegI32 r) {
+        freeGPR(r);
+    }
+
+    void freeI64(RegI64 r) {
+        freeInt64(r);
+    }
+
+    void freeF64(RegF64 r) {
+        freeFPU(r);
+    }
+
+    void freeF32(RegF32 r) {
+        freeFPU(r);
+    }
+
+#ifdef JS_CODEGEN_ARM
+    MOZ_MUST_USE RegI64 needI64Pair() {
+        if (!hasGPRPair())
+            bc.sync();
+        Register low, high;
+        allocGPRPair(&low, &high);
+        return RegI64(Register64(high, low));
+    }
+#endif
+
+#ifdef DEBUG
+    friend class LeakCheck;
+
+    class MOZ_RAII LeakCheck
+    {
+      private:
+        const BaseRegAlloc&           ra;
+        AllocatableGeneralRegisterSet knownGPR;
+        AllocatableFloatRegisterSet   knownFPU;
+
+      public:
+        explicit LeakCheck(const BaseRegAlloc& ra) : ra(ra) {
+            knownGPR = ra.availGPR;
+            knownFPU = ra.availFPU;
+        }
+
+        ~LeakCheck() {
+            MOZ_ASSERT(knownGPR.bits() == ra.allGPR.bits());
+            MOZ_ASSERT(knownFPU.bits() == ra.allFPU.bits());
+        }
+
+        void addKnownI32(RegI32 r) {
+            knownGPR.add(r);
+        }
+
+        void addKnownI64(RegI64 r) {
+# ifdef JS_PUNBOX64
+            knownGPR.add(r.reg);
+# else
+            knownGPR.add(r.high);
+            knownGPR.add(r.low);
+# endif
+        }
+
+        void addKnownF32(RegF32 r) {
+            knownFPU.add(r);
+        }
+
+        void addKnownF64(RegF64 r) {
+            knownFPU.add(r);
+        }
+    };
+#endif
+};
+
+// ScratchRegister abstractions.  We define our own, deferring to the platform's
+// when possible.
+
+#if defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_NONE)
+class ScratchDoubleScope
+{
+  public:
+    explicit ScratchDoubleScope(MacroAssembler& m) {}
+    operator FloatRegister() const {
+        MOZ_CRASH("BaseCompiler platform hook - ScratchDoubleScope");
+    }
+};
+
+class ScratchFloat32Scope
+{
+  public:
+    explicit ScratchFloat32Scope(MacroAssembler& m) {}
+    operator FloatRegister() const {
+        MOZ_CRASH("BaseCompiler platform hook - ScratchFloat32Scope");
+    }
+};
+
+class ScratchRegisterScope
+{
+  public:
+    explicit ScratchRegisterScope(MacroAssembler& m) {}
+    operator Register() const {
+        MOZ_CRASH("BaseCompiler platform hook - ScratchRegisterScope");
+    }
+};
+#endif
+
+class ScratchF64 : public ScratchDoubleScope
+{
+  public:
+    explicit ScratchF64(MacroAssembler& m) : ScratchDoubleScope(m) {}
+    operator RegF64() const { return RegF64(FloatRegister(*this)); }
+};
+
+class ScratchF32 : public ScratchFloat32Scope
+{
+  public:
+    explicit ScratchF32(MacroAssembler& m) : ScratchFloat32Scope(m) {}
+    operator RegF32() const { return RegF32(FloatRegister(*this)); }
+};
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+// On x86 we do not have a dedicated masm scratch register; on ARM, we need one
+// in addition to the one defined by masm because masm uses it too often.
+class ScratchI32
+{
+# ifdef DEBUG
+    BaseRegAlloc& ra;
+  public:
+    explicit ScratchI32(BaseRegAlloc& ra) : ra(ra) {
+        MOZ_ASSERT(!ra.scratchRegisterTaken());
+        ra.setScratchRegisterTaken(true);
+    }
+    ~ScratchI32() {
+        MOZ_ASSERT(ra.scratchRegisterTaken());
+        ra.setScratchRegisterTaken(false);
+    }
+# else
+  public:
+    explicit ScratchI32(BaseRegAlloc&) {}
+# endif
+
+    operator RegI32() const {
+# ifdef JS_CODEGEN_X86
+        return RegI32(ScratchRegX86);
+# else
+        return RegI32(ScratchRegARM);
+# endif
+    }
+};
+#else
+class ScratchI32 : public ScratchRegisterScope
+{
+  public:
+    explicit ScratchI32(MacroAssembler& m) : ScratchRegisterScope(m) {}
+    operator RegI32() const { return RegI32(Register(*this)); }
+};
+#endif
+
+#if defined(JS_CODEGEN_X86)
+// ScratchEBX is a mnemonic device: For some atomic ops we really need EBX,
+// no other register will do.  And we would normally have to allocate that
+// register using ScratchI32 since normally the scratch register is EBX.
+// But the whole point of ScratchI32 is to hide that relationship.  By using
+// the ScratchEBX alias, we document that at that point we require the
+// scratch register to be EBX.
+typedef ScratchI32 ScratchEBX;
+#endif
+
+class BaseCompiler final : public BaseCompilerInterface
+{
+    typedef Vector<NonAssertingLabel, 8, SystemAllocPolicy> LabelVector;
+    typedef Vector<MIRType, 8, SystemAllocPolicy> MIRTypeVector;
+
+    struct Local
+    {
+        Local() : type_(MIRType::None), offs_(UINT32_MAX) {}
+        Local(MIRType type, uint32_t offs) : type_(type), offs_(offs) {}
+
+        void init(MIRType type_, uint32_t offs_) {
+            this->type_ = type_;
+            this->offs_ = offs_;
+        }
+
+        MIRType  type_;              // Type of the value, or MIRType::None
+        uint32_t offs_;              // Zero-based frame offset of value, or UINT32_MAX
+
+        MIRType type() const { MOZ_ASSERT(type_ != MIRType::None); return type_; }
+        uint32_t offs() const { MOZ_ASSERT(offs_ != UINT32_MAX); return offs_; }
+    };
+
+    // Bit set used for simple bounds check elimination.  Capping this at 64
+    // locals makes sense; even 32 locals would probably be OK in practice.
+    //
+    // For more information about BCE, see the block comment above
+    // popMemoryAccess(), below.
+
+    typedef uint64_t BCESet;
+
+    // Control node, representing labels and stack heights at join points.
+
+    struct Control
+    {
+        Control()
+            : framePushed(UINT32_MAX),
+              stackSize(UINT32_MAX),
+              bceSafeOnEntry(0),
+              bceSafeOnExit(~BCESet(0)),
+              deadOnArrival(false),
+              deadThenBranch(false)
+        {}
+
+        NonAssertingLabel label;        // The "exit" label
+        NonAssertingLabel otherLabel;   // Used for the "else" branch of if-then-else
+        uint32_t framePushed;           // From masm
+        uint32_t stackSize;             // Value stack height
+        BCESet bceSafeOnEntry;          // Bounds check info flowing into the item
+        BCESet bceSafeOnExit;           // Bounds check info flowing out of the item
+        bool deadOnArrival;             // deadCode_ was set on entry to the region
+        bool deadThenBranch;            // deadCode_ was set on exit from "then"
+    };
+
+    struct BaseCompilePolicy
+    {
+        // The baseline compiler tracks values on a stack of its own -- it
+        // needs to scan that stack for spilling -- and thus has no need
+        // for the values maintained by the iterator.
+        typedef Nothing Value;
+
+        // The baseline compiler uses the iterator's control stack, attaching
+        // its own control information.
+        typedef Control ControlItem;
+    };
+
+    typedef OpIter<BaseCompilePolicy> BaseOpIter;
+
+    // The baseline compiler will use OOL code more sparingly than
+    // Baldr since our code is not high performance and frills like
+    // code density and branch prediction friendliness will be less
+    // important.
+
+    class OutOfLineCode : public TempObject
+    {
+      private:
+        NonAssertingLabel entry_;
+        NonAssertingLabel rejoin_;
+        uint32_t framePushed_;
+
+      public:
+        OutOfLineCode() : framePushed_(UINT32_MAX) {}
+
+        Label* entry() { return &entry_; }
+        Label* rejoin() { return &rejoin_; }
+
+        void setFramePushed(uint32_t framePushed) {
+            MOZ_ASSERT(framePushed_ == UINT32_MAX);
+            framePushed_ = framePushed;
+        }
+
+        void bind(MacroAssembler& masm) {
+            MOZ_ASSERT(framePushed_ != UINT32_MAX);
+            masm.bind(&entry_);
+            masm.setFramePushed(framePushed_);
+        }
+
+        // The generate() method must be careful about register use
+        // because it will be invoked when there is a register
+        // assignment in the BaseCompiler that does not correspond
+        // to the available registers when the generated OOL code is
+        // executed.  The register allocator *must not* be called.
+        //
+        // The best strategy is for the creator of the OOL object to
+        // allocate all temps that the OOL code will need.
+        //
+        // Input, output, and temp registers are embedded in the OOL
+        // object and are known to the code generator.
+        //
+        // Scratch registers are available to use in OOL code.
+        //
+        // All other registers must be explicitly saved and restored
+        // by the OOL code before being used.
+
+        virtual void generate(MacroAssembler& masm) = 0;
+    };
+
+    enum class LatentOp {
+        None,
+        Compare,
+        Eqz
+    };
+
+    struct AccessCheck {
+        AccessCheck()
+          : omitBoundsCheck(false),
+            omitAlignmentCheck(false),
+            onlyPointerAlignment(false)
+        {}
+
+        // If `omitAlignmentCheck` is true then we need check neither the
+        // pointer nor the offset.  Otherwise, if `onlyPointerAlignment` is true
+        // then we need check only the pointer.  Otherwise, check the sum of
+        // pointer and offset.
+
+        bool omitBoundsCheck;
+        bool omitAlignmentCheck;
+        bool onlyPointerAlignment;
+    };
+
+    const ModuleEnvironment&    env_;
+    BaseOpIter                  iter_;
+    const FuncCompileInput&     func_;
+    size_t                      lastReadCallSite_;
+    TempAllocator&              alloc_;
+    const ValTypeVector&        locals_;         // Types of parameters and locals
+    int32_t                     localSize_;      // Size of local area in bytes (stable after beginFunction)
+    int32_t                     varLow_;         // Low byte offset of local area for true locals (not parameters)
+    int32_t                     varHigh_;        // High byte offset + 1 of local area for true locals
+    int32_t                     maxFramePushed_; // Max value of masm.framePushed() observed
+    bool                        deadCode_;       // Flag indicating we should decode & discard the opcode
+    bool                        debugEnabled_;
+    BCESet                      bceSafe_;        // Locals that have been bounds checked and not updated since
+    ValTypeVector               SigD_;
+    ValTypeVector               SigF_;
+    MIRTypeVector               SigP_;
+    MIRTypeVector               SigPI_;
+    MIRTypeVector               SigPII_;
+    MIRTypeVector               SigPIIL_;
+    MIRTypeVector               SigPILL_;
+    NonAssertingLabel           returnLabel_;
+    NonAssertingLabel           stackOverflowLabel_;
+    CodeOffset                  stackAddOffset_;
+    CompileMode                 mode_;
+
+    LatentOp                    latentOp_;       // Latent operation for branch (seen next)
+    ValType                     latentType_;     // Operand type, if latentOp_ is true
+    Assembler::Condition        latentIntCmp_;   // Comparison operator, if latentOp_ == Compare, int types
+    Assembler::DoubleCondition  latentDoubleCmp_;// Comparison operator, if latentOp_ == Compare, float types
+
+    FuncOffsets                 offsets_;
+    MacroAssembler&             masm;            // No '_' suffix - too tedious...
+    BaseRegAlloc                ra;              // Ditto
+
+    Vector<Local, 8, SystemAllocPolicy> localInfo_;
+    Vector<OutOfLineCode*, 8, SystemAllocPolicy> outOfLine_;
+
+    // On specific platforms we sometimes need to use specific registers.
+
+#ifdef JS_CODEGEN_X64
+    RegI64 specific_rax;
+    RegI64 specific_rcx;
+    RegI64 specific_rdx;
+#endif
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+    RegI32 specific_eax;
+    RegI32 specific_ecx;
+    RegI32 specific_edx;
+    RegI32 specific_edi;
+    RegI32 specific_esi;
+#endif
+
+#if defined(JS_CODEGEN_X86)
+    RegI64 specific_ecx_ebx;
+    RegI64 specific_edx_eax;
+#endif
+
+#if !defined(JS_PUNBOX64)
+    RegI64 abiReturnRegI64;
+#endif
+
+    // The join registers are used to carry values out of blocks.
+    // JoinRegI32 and joinRegI64 must overlap: emitBrIf and
+    // emitBrTable assume that.
+
+    RegI32 joinRegI32;
+    RegI64 joinRegI64;
+    RegF32 joinRegF32;
+    RegF64 joinRegF64;
+
+    // There are more members scattered throughout.
+
+  public:
+    BaseCompiler(const ModuleEnvironment& env,
+                 Decoder& decoder,
+                 const FuncCompileInput& input,
+                 const ValTypeVector& locals,
+                 bool debugEnabled,
+                 TempAllocator* alloc,
+                 MacroAssembler* masm,
+                 CompileMode mode);
+
+    MOZ_MUST_USE bool init();
+
+    FuncOffsets finish();
+
+    MOZ_MUST_USE bool emitFunction();
+    void emitInitStackLocals();
+
+    const SigWithId& sig() const { return *env_.funcSigs[func_.index]; }
+
+    // Used by some of the ScratchRegister implementations.
+    operator MacroAssembler&() const { return masm; }
+    operator BaseRegAlloc&() { return ra; }
+
+  private:
+
+    ////////////////////////////////////////////////////////////
+    //
+    // Out of line code management.
+
+    MOZ_MUST_USE OutOfLineCode* addOutOfLineCode(OutOfLineCode* ool) {
+        if (!ool || !outOfLine_.append(ool))
+            return nullptr;
+        ool->setFramePushed(masm.framePushed());
+        return ool;
+    }
+
+    MOZ_MUST_USE bool generateOutOfLineCode() {
+        for (uint32_t i = 0; i < outOfLine_.length(); i++) {
+            OutOfLineCode* ool = outOfLine_[i];
+            ool->bind(masm);
+            ool->generate(masm);
+        }
+
+        return !masm.oom();
+    }
+
+    ////////////////////////////////////////////////////////////
+    //
+    // The stack frame.
+
+    // SP-relative load and store.
+
+    int32_t localOffsetToSPOffset(int32_t offset) {
+        return masm.framePushed() - offset;
+    }
+
+    void storeToFrameI32(Register r, int32_t offset) {
+        masm.store32(r, Address(StackPointer, localOffsetToSPOffset(offset)));
+    }
+
+    void storeToFrameI64(Register64 r, int32_t offset) {
+        masm.store64(r, Address(StackPointer, localOffsetToSPOffset(offset)));
+    }
+
+    void storeToFramePtr(Register r, int32_t offset) {
+        masm.storePtr(r, Address(StackPointer, localOffsetToSPOffset(offset)));
+    }
+
+    void storeToFrameF64(FloatRegister r, int32_t offset) {
+        masm.storeDouble(r, Address(StackPointer, localOffsetToSPOffset(offset)));
+    }
+
+    void storeToFrameF32(FloatRegister r, int32_t offset) {
+        masm.storeFloat32(r, Address(StackPointer, localOffsetToSPOffset(offset)));
+    }
+
+    void loadFromFrameI32(Register r, int32_t offset) {
+        masm.load32(Address(StackPointer, localOffsetToSPOffset(offset)), r);
+    }
+
+    void loadFromFrameI64(Register64 r, int32_t offset) {
+        masm.load64(Address(StackPointer, localOffsetToSPOffset(offset)), r);
+    }
+
+    void loadFromFramePtr(Register r, int32_t offset) {
+        masm.loadPtr(Address(StackPointer, localOffsetToSPOffset(offset)), r);
+    }
+
+    void loadFromFrameF64(FloatRegister r, int32_t offset) {
+        masm.loadDouble(Address(StackPointer, localOffsetToSPOffset(offset)), r);
+    }
+
+    void loadFromFrameF32(FloatRegister r, int32_t offset) {
+        masm.loadFloat32(Address(StackPointer, localOffsetToSPOffset(offset)), r);
+    }
+
+    int32_t frameOffsetFromSlot(uint32_t slot, MIRType type) {
+        MOZ_ASSERT(localInfo_[slot].type() == type);
+        return localInfo_[slot].offs();
+    }
+
+    ////////////////////////////////////////////////////////////
+    //
+    // Value stack and high-level register allocation.
+    //
+    // The value stack facilitates some on-the-fly register allocation
+    // and immediate-constant use.  It tracks constants, latent
+    // references to locals, register contents, and values on the CPU
+    // stack.
+    //
+    // The stack can be flushed to memory using sync().  This is handy
+    // to avoid problems with control flow and messy register usage
+    // patterns.
+
+    struct Stk
+    {
+        enum Kind
+        {
+            // The Mem opcodes are all clustered at the beginning to
+            // allow for a quick test within sync().
+            MemI32,               // 32-bit integer stack value ("offs")
+            MemI64,               // 64-bit integer stack value ("offs")
+            MemF32,               // 32-bit floating stack value ("offs")
+            MemF64,               // 64-bit floating stack value ("offs")
+
+            // The Local opcodes follow the Mem opcodes for a similar
+            // quick test within hasLocal().
+            LocalI32,             // Local int32 var ("slot")
+            LocalI64,             // Local int64 var ("slot")
+            LocalF32,             // Local float32 var ("slot")
+            LocalF64,             // Local double var ("slot")
+
+            RegisterI32,          // 32-bit integer register ("i32reg")
+            RegisterI64,          // 64-bit integer register ("i64reg")
+            RegisterF32,          // 32-bit floating register ("f32reg")
+            RegisterF64,          // 64-bit floating register ("f64reg")
+
+            ConstI32,             // 32-bit integer constant ("i32val")
+            ConstI64,             // 64-bit integer constant ("i64val")
+            ConstF32,             // 32-bit floating constant ("f32val")
+            ConstF64,             // 64-bit floating constant ("f64val")
+
+            None                  // Uninitialized or void
+        };
+
+        Kind kind_;
+
+        static const Kind MemLast = MemF64;
+        static const Kind LocalLast = LocalF64;
+
+        union {
+            RegI32   i32reg_;
+            RegI64   i64reg_;
+            RegF32   f32reg_;
+            RegF64   f64reg_;
+            int32_t  i32val_;
+            int64_t  i64val_;
+            float    f32val_;
+            double   f64val_;
+            uint32_t slot_;
+            uint32_t offs_;
+        };
+
+        Stk() { kind_ = None; }
+
+        Kind kind() const { return kind_; }
+        bool isMem() const { return kind_ <= MemLast; }
+
+        RegI32   i32reg() const { MOZ_ASSERT(kind_ == RegisterI32); return i32reg_; }
+        RegI64   i64reg() const { MOZ_ASSERT(kind_ == RegisterI64); return i64reg_; }
+        RegF32   f32reg() const { MOZ_ASSERT(kind_ == RegisterF32); return f32reg_; }
+        RegF64   f64reg() const { MOZ_ASSERT(kind_ == RegisterF64); return f64reg_; }
+        int32_t  i32val() const { MOZ_ASSERT(kind_ == ConstI32); return i32val_; }
+        int64_t  i64val() const { MOZ_ASSERT(kind_ == ConstI64); return i64val_; }
+        // For these two, use an out-param instead of simply returning, to
+        // use the normal stack and not the x87 FP stack (which has effect on
+        // NaNs with the signaling bit set).
+        void     f32val(float* out) const { MOZ_ASSERT(kind_ == ConstF32); *out = f32val_; }
+        void     f64val(double* out) const { MOZ_ASSERT(kind_ == ConstF64); *out = f64val_; }
+        uint32_t slot() const { MOZ_ASSERT(kind_ > MemLast && kind_ <= LocalLast); return slot_; }
+        uint32_t offs() const { MOZ_ASSERT(isMem()); return offs_; }
+
+        void setI32Reg(RegI32 r) { kind_ = RegisterI32; i32reg_ = r; }
+        void setI64Reg(RegI64 r) { kind_ = RegisterI64; i64reg_ = r; }
+        void setF32Reg(RegF32 r) { kind_ = RegisterF32; f32reg_ = r; }
+        void setF64Reg(RegF64 r) { kind_ = RegisterF64; f64reg_ = r; }
+        void setI32Val(int32_t v) { kind_ = ConstI32; i32val_ = v; }
+        void setI64Val(int64_t v) { kind_ = ConstI64; i64val_ = v; }
+        void setF32Val(float v) { kind_ = ConstF32; f32val_ = v; }
+        void setF64Val(double v) { kind_ = ConstF64; f64val_ = v; }
+        void setSlot(Kind k, uint32_t v) { MOZ_ASSERT(k > MemLast && k <= LocalLast); kind_ = k; slot_ = v; }
+        void setOffs(Kind k, uint32_t v) { MOZ_ASSERT(k <= MemLast); kind_ = k; offs_ = v; }
+    };
+
+    Vector<Stk, 8, SystemAllocPolicy> stk_;
+
+    Stk& push() {
+        stk_.infallibleEmplaceBack(Stk());
+        return stk_.back();
+    }
+
+    RegI32 invalidI32() {
+        return RegI32(Register::Invalid());
+    }
+
+    RegI64 invalidI64() {
+        return RegI64(Register64::Invalid());
+    }
+
+    RegF64 invalidF64() {
+        return RegF64(InvalidFloatReg);
+    }
+
+    RegI32 fromI64(RegI64 r) {
+        return RegI32(lowPart(r));
+    }
+
+    RegI64 widenI32(RegI32 r) {
+        MOZ_ASSERT(!isAvailableI32(r));
+#ifdef JS_PUNBOX64
+        return RegI64(Register64(r));
+#else
+        RegI32 high = needI32();
+        return RegI64(Register64(high, r));
+#endif
+    }
+
+    RegI32 narrowI64(RegI64 r) {
+#if defined(JS_PUNBOX64)
+        return RegI32(r.reg);
+#else
+        freeI32(RegI32(r.high));
+        return RegI32(r.low);
+#endif
+    }
+
+    Register lowPart(RegI64 r) {
+#ifdef JS_PUNBOX64
+        return r.reg;
+#else
+        return r.low;
+#endif
+    }
+
+    Register maybeHighPart(RegI64 r) {
+#ifdef JS_PUNBOX64
+        return Register::Invalid();
+#else
+        return r.high;
+#endif
+    }
+
+    void maybeClearHighPart(RegI64 r) {
+#if !defined(JS_PUNBOX64)
+        masm.move32(Imm32(0), r.high);
+#endif
+    }
+
+    bool isAvailableI32(RegI32 r) { return ra.isAvailableI32(r); }
+    bool isAvailableI64(RegI64 r) { return ra.isAvailableI64(r); }
+    bool isAvailableF32(RegF32 r) { return ra.isAvailableF32(r); }
+    bool isAvailableF64(RegF64 r) { return ra.isAvailableF64(r); }
+
+    MOZ_MUST_USE RegI32 needI32() { return ra.needI32(); }
+    MOZ_MUST_USE RegI64 needI64() { return ra.needI64(); }
+    MOZ_MUST_USE RegF32 needF32() { return ra.needF32(); }
+    MOZ_MUST_USE RegF64 needF64() { return ra.needF64(); }
+
+    void needI32(RegI32 specific) { ra.needI32(specific); }
+    void needI64(RegI64 specific) { ra.needI64(specific); }
+    void needF32(RegF32 specific) { ra.needF32(specific); }
+    void needF64(RegF64 specific) { ra.needF64(specific); }
+
+#if defined(JS_CODEGEN_ARM)
+    MOZ_MUST_USE RegI64 needI64Pair() { return ra.needI64Pair(); }
+#endif
+
+    void freeI32(RegI32 r) { ra.freeI32(r); }
+    void freeI64(RegI64 r) { ra.freeI64(r); }
+    void freeF32(RegF32 r) { ra.freeF32(r); }
+    void freeF64(RegF64 r) { ra.freeF64(r); }
+
+    void freeI64Except(RegI64 r, RegI32 except) {
+#ifdef JS_PUNBOX64
+        MOZ_ASSERT(r.reg == except);
+#else
+        MOZ_ASSERT(r.high == except || r.low == except);
+        freeI64(r);
+        needI32(except);
+#endif
+    }
+
+    void maybeFreeI32(RegI32 r) {
+        if (r != invalidI32())
+            freeI32(r);
+    }
+
+    void maybeFreeI64(RegI64 r) {
+        if (r != invalidI64())
+            freeI64(r);
+    }
+
+    void needI32NoSync(RegI32 r) {
+        MOZ_ASSERT(isAvailableI32(r));
+        needI32(r);
+    }
+
+    // TODO / OPTIMIZE: need2xI32() can be optimized along with needI32()
+    // to avoid sync(). (Bug 1316802)
+
+    void need2xI32(RegI32 r0, RegI32 r1) {
+        needI32(r0);
+        needI32(r1);
+    }
+
+    void need2xI64(RegI64 r0, RegI64 r1) {
+        needI64(r0);
+        needI64(r1);
+    }
+
+    void moveI32(RegI32 src, RegI32 dest) {
+        if (src != dest)
+            masm.move32(src, dest);
+    }
+
+    void moveI64(RegI64 src, RegI64 dest) {
+        if (src != dest)
+            masm.move64(src, dest);
+    }
+
+    void moveF64(RegF64 src, RegF64 dest) {
+        if (src != dest)
+            masm.moveDouble(src, dest);
+    }
+
+    void moveF32(RegF32 src, RegF32 dest) {
+        if (src != dest)
+            masm.moveFloat32(src, dest);
+    }
+
+    void setI64(int64_t v, RegI64 r) {
+        masm.move64(Imm64(v), r);
+    }
+
+    void loadConstI32(Register r, Stk& src) {
+        masm.mov(ImmWord(uint32_t(src.i32val())), r);
+    }
+
+    void loadConstI32(Register r, int32_t v) {
+        masm.mov(ImmWord(uint32_t(v)), r);
+    }
+
+    void loadMemI32(Register r, Stk& src) {
+        loadFromFrameI32(r, src.offs());
+    }
+
+    void loadLocalI32(Register r, Stk& src) {
+        loadFromFrameI32(r, frameOffsetFromSlot(src.slot(), MIRType::Int32));
+    }
+
+    void loadRegisterI32(Register r, Stk& src) {
+        if (src.i32reg() != r)
+            masm.move32(src.i32reg(), r);
+    }
+
+    void loadConstI64(Register64 r, Stk &src) {
+        masm.move64(Imm64(src.i64val()), r);
+    }
+
+    void loadMemI64(Register64 r, Stk& src) {
+        loadFromFrameI64(r, src.offs());
+    }
+
+    void loadLocalI64(Register64 r, Stk& src) {
+        loadFromFrameI64(r, frameOffsetFromSlot(src.slot(), MIRType::Int64));
+    }
+
+    void loadRegisterI64(Register64 r, Stk& src) {
+        if (src.i64reg() != r)
+            masm.move64(src.i64reg(), r);
+    }
+
+    void loadConstF64(FloatRegister r, Stk &src) {
+        double d;
+        src.f64val(&d);
+        masm.loadConstantDouble(d, r);
+    }
+
+    void loadMemF64(FloatRegister r, Stk& src) {
+        loadFromFrameF64(r, src.offs());
+    }
+
+    void loadLocalF64(FloatRegister r, Stk& src) {
+        loadFromFrameF64(r, frameOffsetFromSlot(src.slot(), MIRType::Double));
+    }
+
+    void loadRegisterF64(FloatRegister r, Stk& src) {
+        if (src.f64reg() != r)
+            masm.moveDouble(src.f64reg(), r);
+    }
+
+    void loadConstF32(FloatRegister r, Stk &src) {
+        float f;
+        src.f32val(&f);
+        masm.loadConstantFloat32(f, r);
+    }
+
+    void loadMemF32(FloatRegister r, Stk& src) {
+        loadFromFrameF32(r, src.offs());
+    }
+
+    void loadLocalF32(FloatRegister r, Stk& src) {
+        loadFromFrameF32(r, frameOffsetFromSlot(src.slot(), MIRType::Float32));
+    }
+
+    void loadRegisterF32(FloatRegister r, Stk& src) {
+        if (src.f32reg() != r)
+            masm.moveFloat32(src.f32reg(), r);
+    }
+
+    void loadI32(Register r, Stk& src) {
+        switch (src.kind()) {
+          case Stk::ConstI32:
+            loadConstI32(r, src);
+            break;
+          case Stk::MemI32:
+            loadMemI32(r, src);
+            break;
+          case Stk::LocalI32:
+            loadLocalI32(r, src);
+            break;
+          case Stk::RegisterI32:
+            loadRegisterI32(r, src);
+            break;
+          case Stk::None:
+          default:
+            MOZ_CRASH("Compiler bug: Expected I32 on stack");
+        }
+    }
+
+    void loadI64(Register64 r, Stk& src) {
+        switch (src.kind()) {
+          case Stk::ConstI64:
+            loadConstI64(r, src);
+            break;
+          case Stk::MemI64:
+            loadMemI64(r, src);
+            break;
+          case Stk::LocalI64:
+            loadLocalI64(r, src);
+            break;
+          case Stk::RegisterI64:
+            loadRegisterI64(r, src);
+            break;
+          case Stk::None:
+          default:
+            MOZ_CRASH("Compiler bug: Expected I64 on stack");
+        }
+    }
+
+#if !defined(JS_PUNBOX64)
+    void loadI64Low(Register r, Stk& src) {
+        switch (src.kind()) {
+          case Stk::ConstI64:
+            masm.move32(Imm64(src.i64val()).low(), r);
+            break;
+          case Stk::MemI64:
+            loadFromFrameI32(r, src.offs() - INT64LOW_OFFSET);
+            break;
+          case Stk::LocalI64:
+            loadFromFrameI32(r, frameOffsetFromSlot(src.slot(), MIRType::Int64) - INT64LOW_OFFSET);
+            break;
+          case Stk::RegisterI64:
+            if (src.i64reg().low != r)
+                masm.move32(src.i64reg().low, r);
+            break;
+          case Stk::None:
+          default:
+            MOZ_CRASH("Compiler bug: Expected I64 on stack");
+        }
+    }
+
+    void loadI64High(Register r, Stk& src) {
+        switch (src.kind()) {
+          case Stk::ConstI64:
+            masm.move32(Imm64(src.i64val()).hi(), r);
+            break;
+          case Stk::MemI64:
+            loadFromFrameI32(r, src.offs() - INT64HIGH_OFFSET);
+            break;
+          case Stk::LocalI64:
+            loadFromFrameI32(r, frameOffsetFromSlot(src.slot(), MIRType::Int64) - INT64HIGH_OFFSET);
+            break;
+          case Stk::RegisterI64:
+            if (src.i64reg().high != r)
+                masm.move32(src.i64reg().high, r);
+            break;
+          case Stk::None:
+          default:
+            MOZ_CRASH("Compiler bug: Expected I64 on stack");
+        }
+    }
+#endif
+
+    void loadF64(FloatRegister r, Stk& src) {
+        switch (src.kind()) {
+          case Stk::ConstF64:
+            loadConstF64(r, src);
+            break;
+          case Stk::MemF64:
+            loadMemF64(r, src);
+            break;
+          case Stk::LocalF64:
+            loadLocalF64(r, src);
+            break;
+          case Stk::RegisterF64:
+            loadRegisterF64(r, src);
+            break;
+          case Stk::None:
+          default:
+            MOZ_CRASH("Compiler bug: expected F64 on stack");
+        }
+    }
+
+    void loadF32(FloatRegister r, Stk& src) {
+        switch (src.kind()) {
+          case Stk::ConstF32:
+            loadConstF32(r, src);
+            break;
+          case Stk::MemF32:
+            loadMemF32(r, src);
+            break;
+          case Stk::LocalF32:
+            loadLocalF32(r, src);
+            break;
+          case Stk::RegisterF32:
+            loadRegisterF32(r, src);
+            break;
+          case Stk::None:
+          default:
+            MOZ_CRASH("Compiler bug: expected F32 on stack");
+        }
+    }
+
+    // Flush all local and register value stack elements to memory.
+    //
+    // TODO / OPTIMIZE: As this is fairly expensive and causes worse
+    // code to be emitted subsequently, it is useful to avoid calling
+    // it.  (Bug 1316802)
+    //
+    // Some optimization has been done already.  Remaining
+    // opportunities:
+    //
+    //  - It would be interesting to see if we can specialize it
+    //    before calls with particularly simple signatures, or where
+    //    we can do parallel assignment of register arguments, or
+    //    similar.  See notes in emitCall().
+    //
+    //  - Operations that need specific registers: multiply, quotient,
+    //    remainder, will tend to sync because the registers we need
+    //    will tend to be allocated.  We may be able to avoid that by
+    //    prioritizing registers differently (takeLast instead of
+    //    takeFirst) but we may also be able to allocate an unused
+    //    register on demand to free up one we need, thus avoiding the
+    //    sync.  That type of fix would go into needI32().
+
+    void sync() final {
+        size_t start = 0;
+        size_t lim = stk_.length();
+
+        for (size_t i = lim; i > 0; i--) {
+            // Memory opcodes are first in the enum, single check against MemLast is fine.
+            if (stk_[i - 1].kind() <= Stk::MemLast) {
+                start = i;
+                break;
+            }
+        }
+
+        for (size_t i = start; i < lim; i++) {
+            Stk& v = stk_[i];
+            switch (v.kind()) {
+              case Stk::LocalI32: {
+                ScratchI32 scratch(*this);
+                loadLocalI32(scratch, v);
+                masm.Push(scratch);
+                v.setOffs(Stk::MemI32, masm.framePushed());
+                break;
+              }
+              case Stk::RegisterI32: {
+                masm.Push(v.i32reg());
+                freeI32(v.i32reg());
+                v.setOffs(Stk::MemI32, masm.framePushed());
+                break;
+              }
+              case Stk::LocalI64: {
+                ScratchI32 scratch(*this);
+#ifdef JS_PUNBOX64
+                loadI64(Register64(scratch), v);
+                masm.Push(scratch);
+#else
+                int32_t offset = frameOffsetFromSlot(v.slot(), MIRType::Int64);
+                loadFromFrameI32(scratch, offset - INT64HIGH_OFFSET);
+                masm.Push(scratch);
+                loadFromFrameI32(scratch, offset - INT64LOW_OFFSET);
+                masm.Push(scratch);
+#endif
+                v.setOffs(Stk::MemI64, masm.framePushed());
+                break;
+              }
+              case Stk::RegisterI64: {
+#ifdef JS_PUNBOX64
+                masm.Push(v.i64reg().reg);
+                freeI64(v.i64reg());
+#else
+                masm.Push(v.i64reg().high);
+                masm.Push(v.i64reg().low);
+                freeI64(v.i64reg());
+#endif
+                v.setOffs(Stk::MemI64, masm.framePushed());
+                break;
+              }
+              case Stk::LocalF64: {
+                ScratchF64 scratch(*this);
+                loadF64(scratch, v);
+                masm.Push(scratch);
+                v.setOffs(Stk::MemF64, masm.framePushed());
+                break;
+              }
+              case Stk::RegisterF64: {
+                masm.Push(v.f64reg());
+                freeF64(v.f64reg());
+                v.setOffs(Stk::MemF64, masm.framePushed());
+                break;
+              }
+              case Stk::LocalF32: {
+                ScratchF32 scratch(*this);
+                loadF32(scratch, v);
+                masm.Push(scratch);
+                v.setOffs(Stk::MemF32, masm.framePushed());
+                break;
+              }
+              case Stk::RegisterF32: {
+                masm.Push(v.f32reg());
+                freeF32(v.f32reg());
+                v.setOffs(Stk::MemF32, masm.framePushed());
+                break;
+              }
+              default: {
+                break;
+              }
+            }
+        }
+
+        maxFramePushed_ = Max(maxFramePushed_, int32_t(masm.framePushed()));
+    }
+
+    // This is an optimization used to avoid calling sync() for
+    // setLocal(): if the local does not exist unresolved on the stack
+    // then we can skip the sync.
+
+    bool hasLocal(uint32_t slot) {
+        for (size_t i = stk_.length(); i > 0; i--) {
+            // Memory opcodes are first in the enum, single check against MemLast is fine.
+            Stk::Kind kind = stk_[i-1].kind();
+            if (kind <= Stk::MemLast)
+                return false;
+
+            // Local opcodes follow memory opcodes in the enum, single check against
+            // LocalLast is sufficient.
+            if (kind <= Stk::LocalLast && stk_[i-1].slot() == slot)
+                return true;
+        }
+        return false;
+    }
+
+    void syncLocal(uint32_t slot) {
+        if (hasLocal(slot))
+            sync();            // TODO / OPTIMIZE: Improve this?  (Bug 1316817)
+    }
+
+    // Push the register r onto the stack.
+
+    void pushI32(RegI32 r) {
+        MOZ_ASSERT(!isAvailableI32(r));
+        Stk& x = push();
+        x.setI32Reg(r);
+    }
+
+    void pushI64(RegI64 r) {
+        MOZ_ASSERT(!isAvailableI64(r));
+        Stk& x = push();
+        x.setI64Reg(r);
+    }
+
+    void pushF64(RegF64 r) {
+        MOZ_ASSERT(!isAvailableF64(r));
+        Stk& x = push();
+        x.setF64Reg(r);
+    }
+
+    void pushF32(RegF32 r) {
+        MOZ_ASSERT(!isAvailableF32(r));
+        Stk& x = push();
+        x.setF32Reg(r);
+    }
+
+    // Push the value onto the stack.
+
+    void pushI32(int32_t v) {
+        Stk& x = push();
+        x.setI32Val(v);
+    }
+
+    void pushI64(int64_t v) {
+        Stk& x = push();
+        x.setI64Val(v);
+    }
+
+    void pushF64(double v) {
+        Stk& x = push();
+        x.setF64Val(v);
+    }
+
+    void pushF32(float v) {
+        Stk& x = push();
+        x.setF32Val(v);
+    }
+
+    // Push the local slot onto the stack.  The slot will not be read
+    // here; it will be read when it is consumed, or when a side
+    // effect to the slot forces its value to be saved.
+
+    void pushLocalI32(uint32_t slot) {
+        Stk& x = push();
+        x.setSlot(Stk::LocalI32, slot);
+    }
+
+    void pushLocalI64(uint32_t slot) {
+        Stk& x = push();
+        x.setSlot(Stk::LocalI64, slot);
+    }
+
+    void pushLocalF64(uint32_t slot) {
+        Stk& x = push();
+        x.setSlot(Stk::LocalF64, slot);
+    }
+
+    void pushLocalF32(uint32_t slot) {
+        Stk& x = push();
+        x.setSlot(Stk::LocalF32, slot);
+    }
+
+    // PRIVATE.  Call only from other popI32() variants.
+    // v must be the stack top.
+
+    void popI32(Stk& v, RegI32 r) {
+        switch (v.kind()) {
+          case Stk::ConstI32:
+            loadConstI32(r, v);
+            break;
+          case Stk::LocalI32:
+            loadLocalI32(r, v);
+            break;
+          case Stk::MemI32:
+            masm.Pop(r);
+            break;
+          case Stk::RegisterI32:
+            loadRegisterI32(r, v);
+            break;
+          case Stk::None:
+          default:
+            MOZ_CRASH("Compiler bug: expected int on stack");
+        }
+    }
+
+    MOZ_MUST_USE RegI32 popI32() {
+        Stk& v = stk_.back();
+        RegI32 r;
+        if (v.kind() == Stk::RegisterI32)
+            r = v.i32reg();
+        else
+            popI32(v, (r = needI32()));
+        stk_.popBack();
+        return r;
+    }
+
+    RegI32 popI32(RegI32 specific) {
+        Stk& v = stk_.back();
+
+        if (!(v.kind() == Stk::RegisterI32 && v.i32reg() == specific)) {
+            needI32(specific);
+            popI32(v, specific);
+            if (v.kind() == Stk::RegisterI32)
+                freeI32(v.i32reg());
+        }
+
+        stk_.popBack();
+        return specific;
+    }
+
+    // PRIVATE.  Call only from other popI64() variants.
+    // v must be the stack top.
+
+    void popI64(Stk& v, RegI64 r) {
+        switch (v.kind()) {
+          case Stk::ConstI64:
+            loadConstI64(r, v);
+            break;
+          case Stk::LocalI64:
+            loadLocalI64(r, v);
+            break;
+          case Stk::MemI64:
+#ifdef JS_PUNBOX64
+            masm.Pop(r.reg);
+#else
+            masm.Pop(r.low);
+            masm.Pop(r.high);
+#endif
+            break;
+          case Stk::RegisterI64:
+            loadRegisterI64(r, v);
+            break;
+          case Stk::None:
+          default:
+            MOZ_CRASH("Compiler bug: expected long on stack");
+        }
+    }
+
+    MOZ_MUST_USE RegI64 popI64() {
+        Stk& v = stk_.back();
+        RegI64 r;
+        if (v.kind() == Stk::RegisterI64)
+            r = v.i64reg();
+        else
+            popI64(v, (r = needI64()));
+        stk_.popBack();
+        return r;
+    }
+
+    // Note, the stack top can be in one half of "specific" on 32-bit
+    // systems.  We can optimize, but for simplicity, if the register
+    // does not match exactly, then just force the stack top to memory
+    // and then read it back in.
+
+    RegI64 popI64(RegI64 specific) {
+        Stk& v = stk_.back();
+
+        if (!(v.kind() == Stk::RegisterI64 && v.i64reg() == specific)) {
+            needI64(specific);
+            popI64(v, specific);
+            if (v.kind() == Stk::RegisterI64)
+                freeI64(v.i64reg());
+        }
+
+        stk_.popBack();
+        return specific;
+    }
+
+    // PRIVATE.  Call only from other popF64() variants.
+    // v must be the stack top.
+
+    void popF64(Stk& v, RegF64 r) {
+        switch (v.kind()) {
+          case Stk::ConstF64:
+            loadConstF64(r, v);
+            break;
+          case Stk::LocalF64:
+            loadLocalF64(r, v);
+            break;
+          case Stk::MemF64:
+            masm.Pop(r);
+            break;
+          case Stk::RegisterF64:
+            loadRegisterF64(r, v);
+            break;
+          case Stk::None:
+          default:
+            MOZ_CRASH("Compiler bug: expected double on stack");
+        }
+    }
+
+    MOZ_MUST_USE RegF64 popF64() {
+        Stk& v = stk_.back();
+        RegF64 r;
+        if (v.kind() == Stk::RegisterF64)
+            r = v.f64reg();
+        else
+            popF64(v, (r = needF64()));
+        stk_.popBack();
+        return r;
+    }
+
+    RegF64 popF64(RegF64 specific) {
+        Stk& v = stk_.back();
+
+        if (!(v.kind() == Stk::RegisterF64 && v.f64reg() == specific)) {
+            needF64(specific);
+            popF64(v, specific);
+            if (v.kind() == Stk::RegisterF64)
+                freeF64(v.f64reg());
+        }
+
+        stk_.popBack();
+        return specific;
+    }
+
+    // PRIVATE.  Call only from other popF32() variants.
+    // v must be the stack top.
+
+    void popF32(Stk& v, RegF32 r) {
+        switch (v.kind()) {
+          case Stk::ConstF32:
+            loadConstF32(r, v);
+            break;
+          case Stk::LocalF32:
+            loadLocalF32(r, v);
+            break;
+          case Stk::MemF32:
+            masm.Pop(r);
+            break;
+          case Stk::RegisterF32:
+            loadRegisterF32(r, v);
+            break;
+          case Stk::None:
+          default:
+            MOZ_CRASH("Compiler bug: expected float on stack");
+        }
+    }
+
+    MOZ_MUST_USE RegF32 popF32() {
+        Stk& v = stk_.back();
+        RegF32 r;
+        if (v.kind() == Stk::RegisterF32)
+            r = v.f32reg();
+        else
+            popF32(v, (r = needF32()));
+        stk_.popBack();
+        return r;
+    }
+
+    RegF32 popF32(RegF32 specific) {
+        Stk& v = stk_.back();
+
+        if (!(v.kind() == Stk::RegisterF32 && v.f32reg() == specific)) {
+            needF32(specific);
+            popF32(v, specific);
+            if (v.kind() == Stk::RegisterF32)
+                freeF32(v.f32reg());
+        }
+
+        stk_.popBack();
+        return specific;
+    }
+
+    MOZ_MUST_USE bool popConstI32(int32_t* c) {
+        Stk& v = stk_.back();
+        if (v.kind() != Stk::ConstI32)
+            return false;
+        *c = v.i32val();
+        stk_.popBack();
+        return true;
+    }
+
+    MOZ_MUST_USE bool popConstI64(int64_t* c) {
+        Stk& v = stk_.back();
+        if (v.kind() != Stk::ConstI64)
+            return false;
+        *c = v.i64val();
+        stk_.popBack();
+        return true;
+    }
+
+    MOZ_MUST_USE bool peekConstI32(int32_t* c) {
+        Stk& v = stk_.back();
+        if (v.kind() != Stk::ConstI32)
+            return false;
+        *c = v.i32val();
+        return true;
+    }
+
+    MOZ_MUST_USE bool peekConstI64(int64_t* c) {
+        Stk& v = stk_.back();
+        if (v.kind() != Stk::ConstI64)
+            return false;
+        *c = v.i64val();
+        return true;
+    }
+
+    MOZ_MUST_USE bool popConstPositivePowerOfTwoI32(int32_t* c,
+                                                    uint_fast8_t* power,
+                                                    int32_t cutoff)
+    {
+        Stk& v = stk_.back();
+        if (v.kind() != Stk::ConstI32)
+            return false;
+        *c = v.i32val();
+        if (*c <= cutoff || !IsPowerOfTwo(static_cast<uint32_t>(*c)))
+            return false;
+        *power = FloorLog2(*c);
+        stk_.popBack();
+        return true;
+    }
+
+    MOZ_MUST_USE bool popConstPositivePowerOfTwoI64(int64_t* c,
+                                                    uint_fast8_t* power,
+                                                    int64_t cutoff)
+    {
+        Stk& v = stk_.back();
+        if (v.kind() != Stk::ConstI64)
+            return false;
+        *c = v.i64val();
+        if (*c <= cutoff || !IsPowerOfTwo(static_cast<uint64_t>(*c)))
+            return false;
+        *power = FloorLog2(*c);
+        stk_.popBack();
+        return true;
+    }
+
+    MOZ_MUST_USE bool peekLocalI32(uint32_t* local) {
+        Stk& v = stk_.back();
+        if (v.kind() != Stk::LocalI32)
+            return false;
+        *local = v.slot();
+        return true;
+    }
+
+    // TODO / OPTIMIZE (Bug 1316818): At the moment we use ReturnReg
+    // for JoinReg.  It is possible other choices would lead to better
+    // register allocation, as ReturnReg is often first in the
+    // register set and will be heavily wanted by the register
+    // allocator that uses takeFirst().
+    //
+    // Obvious options:
+    //  - pick a register at the back of the register set
+    //  - pick a random register per block (different blocks have
+    //    different join regs)
+    //
+    // On the other hand, we sync() before every block and only the
+    // JoinReg is live out of the block.  But on the way out, we
+    // currently pop the JoinReg before freeing regs to be discarded,
+    // so there is a real risk of some pointless shuffling there.  If
+    // we instead integrate the popping of the join reg into the
+    // popping of the stack we can just use the JoinReg as it will
+    // become available in that process.
+
+    MOZ_MUST_USE Maybe<AnyReg> popJoinRegUnlessVoid(ExprType type) {
+        switch (type) {
+          case ExprType::Void: {
+            return Nothing();
+          }
+          case ExprType::I32: {
+            DebugOnly<Stk::Kind> k(stk_.back().kind());
+            MOZ_ASSERT(k == Stk::RegisterI32 || k == Stk::ConstI32 || k == Stk::MemI32 ||
+                       k == Stk::LocalI32);
+            return Some(AnyReg(popI32(joinRegI32)));
+          }
+          case ExprType::I64: {
+            DebugOnly<Stk::Kind> k(stk_.back().kind());
+            MOZ_ASSERT(k == Stk::RegisterI64 || k == Stk::ConstI64 || k == Stk::MemI64 ||
+                       k == Stk::LocalI64);
+            return Some(AnyReg(popI64(joinRegI64)));
+          }
+          case ExprType::F64: {
+            DebugOnly<Stk::Kind> k(stk_.back().kind());
+            MOZ_ASSERT(k == Stk::RegisterF64 || k == Stk::ConstF64 || k == Stk::MemF64 ||
+                       k == Stk::LocalF64);
+            return Some(AnyReg(popF64(joinRegF64)));
+          }
+          case ExprType::F32: {
+            DebugOnly<Stk::Kind> k(stk_.back().kind());
+            MOZ_ASSERT(k == Stk::RegisterF32 || k == Stk::ConstF32 || k == Stk::MemF32 ||
+                       k == Stk::LocalF32);
+            return Some(AnyReg(popF32(joinRegF32)));
+          }
+          default: {
+            MOZ_CRASH("Compiler bug: unexpected expression type");
+          }
+        }
+    }
+
+    // If we ever start not sync-ing on entry to Block (but instead try to sync
+    // lazily) then this may start asserting because it does not spill the
+    // joinreg if the joinreg is already allocated.  Note, it *can't* spill the
+    // joinreg in the contexts it's being used, so some other solution will need
+    // to be found.
+
+    MOZ_MUST_USE Maybe<AnyReg> captureJoinRegUnlessVoid(ExprType type) {
+        switch (type) {
+          case ExprType::I32:
+            MOZ_ASSERT(isAvailableI32(joinRegI32));
+            needI32(joinRegI32);
+            return Some(AnyReg(joinRegI32));
+          case ExprType::I64:
+            MOZ_ASSERT(isAvailableI64(joinRegI64));
+            needI64(joinRegI64);
+            return Some(AnyReg(joinRegI64));
+          case ExprType::F32:
+            MOZ_ASSERT(isAvailableF32(joinRegF32));
+            needF32(joinRegF32);
+            return Some(AnyReg(joinRegF32));
+          case ExprType::F64:
+            MOZ_ASSERT(isAvailableF64(joinRegF64));
+            needF64(joinRegF64);
+            return Some(AnyReg(joinRegF64));
+          case ExprType::Void:
+            return Nothing();
+          default:
+            MOZ_CRASH("Compiler bug: unexpected type");
+        }
+    }
+
+    void pushJoinRegUnlessVoid(const Maybe<AnyReg>& r) {
+        if (!r)
+            return;
+        switch (r->tag) {
+          case AnyReg::I32:
+            pushI32(r->i32());
+            break;
+          case AnyReg::I64:
+            pushI64(r->i64());
+            break;
+          case AnyReg::F64:
+            pushF64(r->f64());
+            break;
+          case AnyReg::F32:
+            pushF32(r->f32());
+            break;
+        }
+    }
+
+    void freeJoinRegUnlessVoid(const Maybe<AnyReg>& r) {
+        if (!r)
+            return;
+        switch (r->tag) {
+          case AnyReg::I32:
+            freeI32(r->i32());
+            break;
+          case AnyReg::I64:
+            freeI64(r->i64());
+            break;
+          case AnyReg::F64:
+            freeF64(r->f64());
+            break;
+          case AnyReg::F32:
+            freeF32(r->f32());
+            break;
+        }
+    }
+
+    void maybeReserveJoinRegI(ExprType type) {
+        if (type == ExprType::I32)
+            needI32(joinRegI32);
+        else if (type == ExprType::I64)
+            needI64(joinRegI64);
+    }
+
+    void maybeUnreserveJoinRegI(ExprType type) {
+        if (type == ExprType::I32)
+            freeI32(joinRegI32);
+        else if (type == ExprType::I64)
+            freeI64(joinRegI64);
+    }
+
+    void maybeReserveJoinReg(ExprType type) {
+        switch (type) {
+          case ExprType::I32:
+            needI32(joinRegI32);
+            break;
+          case ExprType::I64:
+            needI64(joinRegI64);
+            break;
+          case ExprType::F32:
+            needF32(joinRegF32);
+            break;
+          case ExprType::F64:
+            needF64(joinRegF64);
+            break;
+          default:
+            break;
+        }
+    }
+
+    void maybeUnreserveJoinReg(ExprType type) {
+        switch (type) {
+          case ExprType::I32:
+            freeI32(joinRegI32);
+            break;
+          case ExprType::I64:
+            freeI64(joinRegI64);
+            break;
+          case ExprType::F32:
+            freeF32(joinRegF32);
+            break;
+          case ExprType::F64:
+            freeF64(joinRegF64);
+            break;
+          default:
+            break;
+        }
+    }
+
+    // Return the amount of execution stack consumed by the top numval
+    // values on the value stack.
+
+    size_t stackConsumed(size_t numval) {
+        size_t size = 0;
+        MOZ_ASSERT(numval <= stk_.length());
+        for (uint32_t i = stk_.length() - 1; numval > 0; numval--, i--) {
+            // The size computations come from the implementation of Push() in
+            // MacroAssembler-x86-shared.cpp and MacroAssembler-arm-shared.cpp,
+            // and from VFPRegister::size() in Architecture-arm.h.
+            //
+            // On ARM unlike on x86 we push a single for float.
+
+            Stk& v = stk_[i];
+            switch (v.kind()) {
+              case Stk::MemI32:
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+                size += sizeof(intptr_t);
+#else
+                MOZ_CRASH("BaseCompiler platform hook: stackConsumed I32");
+#endif
+                break;
+              case Stk::MemI64:
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+                size += sizeof(int64_t);
+#else
+                MOZ_CRASH("BaseCompiler platform hook: stackConsumed I64");
+#endif
+                break;
+              case Stk::MemF64:
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+                size += sizeof(double);
+#else
+                MOZ_CRASH("BaseCompiler platform hook: stackConsumed F64");
+#endif
+                break;
+              case Stk::MemF32:
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+                size += sizeof(double);
+#elif defined(JS_CODEGEN_ARM)
+                size += sizeof(float);
+#else
+                MOZ_CRASH("BaseCompiler platform hook: stackConsumed F32");
+#endif
+                break;
+              default:
+                break;
+            }
+        }
+        return size;
+    }
+
+    void popValueStackTo(uint32_t stackSize) {
+        for (uint32_t i = stk_.length(); i > stackSize; i--) {
+            Stk& v = stk_[i-1];
+            switch (v.kind()) {
+              case Stk::RegisterI32:
+                freeI32(v.i32reg());
+                break;
+              case Stk::RegisterI64:
+                freeI64(v.i64reg());
+                break;
+              case Stk::RegisterF64:
+                freeF64(v.f64reg());
+                break;
+              case Stk::RegisterF32:
+                freeF32(v.f32reg());
+                break;
+              default:
+                break;
+            }
+        }
+        stk_.shrinkTo(stackSize);
+    }
+
+    void popValueStackBy(uint32_t items) {
+        popValueStackTo(stk_.length() - items);
+    }
+
+    // Before branching to an outer control label, pop the execution
+    // stack to the level expected by that region, but do not free the
+    // stack as that will happen as compilation leaves the block.
+
+    void popStackBeforeBranch(uint32_t framePushed) {
+        uint32_t frameHere = masm.framePushed();
+        if (frameHere > framePushed)
+            masm.addPtr(ImmWord(frameHere - framePushed), StackPointer);
+    }
+
+    bool willPopStackBeforeBranch(uint32_t framePushed) {
+        uint32_t frameHere = masm.framePushed();
+        return frameHere > framePushed;
+    }
+
+    // Before exiting a nested control region, pop the execution stack
+    // to the level expected by the nesting region, and free the
+    // stack.
+
+    void popStackOnBlockExit(uint32_t framePushed) {
+        uint32_t frameHere = masm.framePushed();
+        if (frameHere > framePushed) {
+            if (deadCode_)
+                masm.adjustStack(frameHere - framePushed);
+            else
+                masm.freeStack(frameHere - framePushed);
+        }
+    }
+
+    void popStackIfMemory() {
+        if (peek(0).isMem())
+            masm.freeStack(stackConsumed(1));
+    }
+
+    // Peek at the stack, for calls.
+
+    Stk& peek(uint32_t relativeDepth) {
+        return stk_[stk_.length()-1-relativeDepth];
+    }
+
+#ifdef DEBUG
+    // Check that we're not leaking registers by comparing the
+    // state of the stack + available registers with the set of
+    // all available registers.
+
+    // Call this between opcodes.
+    void performRegisterLeakCheck() {
+        BaseRegAlloc::LeakCheck check(ra);
+        for (size_t i = 0 ; i < stk_.length() ; i++) {
+            Stk& item = stk_[i];
+            switch (item.kind_) {
+              case Stk::RegisterI32:
+                check.addKnownI32(item.i32reg());
+                break;
+              case Stk::RegisterI64:
+                check.addKnownI64(item.i64reg());
+                break;
+              case Stk::RegisterF32:
+                check.addKnownF32(item.f32reg());
+                break;
+              case Stk::RegisterF64:
+                check.addKnownF64(item.f64reg());
+                break;
+              default:
+                break;
+            }
+        }
+    }
+#endif
+
+    ////////////////////////////////////////////////////////////
+    //
+    // Control stack
+
+    void initControl(Control& item)
+    {
+        // Make sure the constructor was run properly
+        MOZ_ASSERT(item.framePushed == UINT32_MAX && item.stackSize == UINT32_MAX);
+
+        item.framePushed = masm.framePushed();
+        item.stackSize = stk_.length();
+        item.deadOnArrival = deadCode_;
+        item.bceSafeOnEntry = bceSafe_;
+    }
+
+    Control& controlItem() {
+        return iter_.controlItem();
+    }
+
+    Control& controlItem(uint32_t relativeDepth) {
+        return iter_.controlItem(relativeDepth);
+    }
+
+    Control& controlOutermost() {
+        return iter_.controlOutermost();
+    }
+
+    ////////////////////////////////////////////////////////////
+    //
+    // Labels
+
+    void insertBreakablePoint(CallSiteDesc::Kind kind) {
+        // The debug trap exit requires WasmTlsReg be loaded. However, since we
+        // are emitting millions of these breakable points inline, we push this
+        // loading of TLS into the FarJumpIsland created by linkCallSites.
+        masm.nopPatchableToCall(CallSiteDesc(iter_.lastOpcodeOffset(), kind));
+    }
+
+    //////////////////////////////////////////////////////////////////////
+    //
+    // Function prologue and epilogue.
+
+    void beginFunction() {
+        JitSpew(JitSpew_Codegen, "# Emitting wasm baseline code");
+
+        SigIdDesc sigId = env_.funcSigs[func_.index]->id;
+        if (mode_ == CompileMode::Tier1)
+            GenerateFunctionPrologue(masm, localSize_, sigId, &offsets_, mode_, func_.index);
+        else
+            GenerateFunctionPrologue(masm, localSize_, sigId, &offsets_);
+
+        MOZ_ASSERT(masm.framePushed() == uint32_t(localSize_));
+
+        maxFramePushed_ = localSize_;
+
+        if (debugEnabled_) {
+            // Initialize funcIndex and flag fields of DebugFrame.
+            size_t debugFrame = masm.framePushed() - DebugFrame::offsetOfFrame();
+            masm.store32(Imm32(func_.index),
+                         Address(masm.getStackPointer(), debugFrame + DebugFrame::offsetOfFuncIndex()));
+            masm.storePtr(ImmWord(0),
+                          Address(masm.getStackPointer(), debugFrame + DebugFrame::offsetOfFlagsWord()));
+        }
+
+        // We won't know until after we've generated code how big the frame will
+        // be (we may need arbitrary spill slots and outgoing param slots) so
+        // emit a patchable add that is patched in endFunction().
+        //
+        // ScratchReg may be used by branchPtr(), so use ABINonArgReg0/1 for
+        // temporaries.
+
+        stackAddOffset_ = masm.add32ToPtrWithPatch(StackPointer, ABINonArgReg0);
+        masm.wasmEmitStackCheck(ABINonArgReg0, ABINonArgReg1, &stackOverflowLabel_);
+
+        // Copy arguments from registers to stack.
+
+        const ValTypeVector& args = sig().args();
+
+        for (ABIArgIter<const ValTypeVector> i(args); !i.done(); i++) {
+            Local& l = localInfo_[i.index()];
+            switch (i.mirType()) {
+              case MIRType::Int32:
+                if (i->argInRegister())
+                    storeToFrameI32(i->gpr(), l.offs());
+                break;
+              case MIRType::Int64:
+                if (i->argInRegister())
+                    storeToFrameI64(i->gpr64(), l.offs());
+                break;
+              case MIRType::Double:
+                if (i->argInRegister())
+                    storeToFrameF64(i->fpu(), l.offs());
+                break;
+              case MIRType::Float32:
+                if (i->argInRegister())
+                    storeToFrameF32(i->fpu(), l.offs());
+                break;
+              default:
+                MOZ_CRASH("Function argument type");
+            }
+        }
+
+        if (varLow_ < varHigh_)
+            emitInitStackLocals();
+
+        if (debugEnabled_)
+            insertBreakablePoint(CallSiteDesc::EnterFrame);
+    }
+
+    void saveResult() {
+        MOZ_ASSERT(debugEnabled_);
+        size_t debugFrameOffset = masm.framePushed() - DebugFrame::offsetOfFrame();
+        Address resultsAddress(StackPointer, debugFrameOffset + DebugFrame::offsetOfResults());
+        switch (sig().ret()) {
+          case ExprType::Void:
+            break;
+          case ExprType::I32:
+            masm.store32(RegI32(ReturnReg), resultsAddress);
+            break;
+
+          case ExprType::I64:
+            masm.store64(RegI64(ReturnReg64), resultsAddress);
+            break;
+          case ExprType::F64:
+            masm.storeDouble(RegF64(ReturnDoubleReg), resultsAddress);
+            break;
+          case ExprType::F32:
+            masm.storeFloat32(RegF32(ReturnFloat32Reg), resultsAddress);
+            break;
+          default:
+            MOZ_CRASH("Function return type");
+        }
+    }
+
+    void restoreResult() {
+        MOZ_ASSERT(debugEnabled_);
+        size_t debugFrameOffset = masm.framePushed() - DebugFrame::offsetOfFrame();
+        Address resultsAddress(StackPointer, debugFrameOffset + DebugFrame::offsetOfResults());
+        switch (sig().ret()) {
+          case ExprType::Void:
+            break;
+          case ExprType::I32:
+            masm.load32(resultsAddress, RegI32(ReturnReg));
+            break;
+          case ExprType::I64:
+            masm.load64(resultsAddress, RegI64(ReturnReg64));
+            break;
+          case ExprType::F64:
+            masm.loadDouble(resultsAddress, RegF64(ReturnDoubleReg));
+            break;
+          case ExprType::F32:
+            masm.loadFloat32(resultsAddress, RegF32(ReturnFloat32Reg));
+            break;
+          default:
+            MOZ_CRASH("Function return type");
+        }
+    }
+
+    bool endFunction() {
+        // Always branch to stackOverflowLabel_ or returnLabel_.
+        masm.breakpoint();
+
+        // Patch the add in the prologue so that it checks against the correct
+        // frame size. Flush the constant pool in case it needs to be patched.
+        MOZ_ASSERT(maxFramePushed_ >= localSize_);
+        masm.flush();
+
+        // Precondition for patching.
+        if (masm.oom())
+            return false;
+        masm.patchAdd32ToPtr(stackAddOffset_, Imm32(-int32_t(maxFramePushed_ - localSize_)));
+
+        // Since we just overflowed the stack, to be on the safe side, pop the
+        // stack so that, when the trap exit stub executes, it is a safe
+        // distance away from the end of the native stack. If debugEnabled_ is
+        // set, we pop all locals space except allocated for DebugFrame to
+        // maintain the invariant that, when debugEnabled_, all wasm::Frames
+        // are valid wasm::DebugFrames which is observable by WasmHandleThrow.
+        masm.bind(&stackOverflowLabel_);
+        int32_t debugFrameReserved = debugEnabled_ ? DebugFrame::offsetOfFrame() : 0;
+        MOZ_ASSERT(localSize_ >= debugFrameReserved);
+        if (localSize_ > debugFrameReserved)
+            masm.addToStackPtr(Imm32(localSize_ - debugFrameReserved));
+        BytecodeOffset prologueTrapOffset(func_.lineOrBytecode);
+        masm.jump(TrapDesc(prologueTrapOffset, Trap::StackOverflow, debugFrameReserved));
+
+        masm.bind(&returnLabel_);
+
+        if (debugEnabled_) {
+            // Store and reload the return value from DebugFrame::return so that
+            // it can be clobbered, and/or modified by the debug trap.
+            saveResult();
+            insertBreakablePoint(CallSiteDesc::Breakpoint);
+            insertBreakablePoint(CallSiteDesc::LeaveFrame);
+            restoreResult();
+        }
+
+        GenerateFunctionEpilogue(masm, localSize_, &offsets_);
+
+#if defined(JS_ION_PERF)
+        // FIXME - profiling code missing.  No bug for this.
+
+        // Note the end of the inline code and start of the OOL code.
+        //gen->perfSpewer().noteEndInlineCode(masm);
+#endif
+
+        if (!generateOutOfLineCode())
+            return false;
+
+        masm.wasmEmitTrapOutOfLineCode();
+
+        offsets_.end = masm.currentOffset();
+
+        // A frame greater than 256KB is implausible, probably an attack,
+        // so fail the compilation.
+
+        if (maxFramePushed_ > 256 * 1024)
+            return false;
+
+        return !masm.oom();
+    }
+
+    //////////////////////////////////////////////////////////////////////
+    //
+    // Calls.
+
+    struct FunctionCall
+    {
+        explicit FunctionCall(uint32_t lineOrBytecode)
+          : lineOrBytecode(lineOrBytecode),
+            reloadMachineStateAfter(false),
+            usesSystemAbi(false),
+#ifdef JS_CODEGEN_ARM
+            hardFP(true),
+#endif
+            frameAlignAdjustment(0),
+            stackArgAreaSize(0)
+        {}
+
+        uint32_t lineOrBytecode;
+        ABIArgGenerator abi;
+        bool reloadMachineStateAfter;
+        bool usesSystemAbi;
+#ifdef JS_CODEGEN_ARM
+        bool hardFP;
+#endif
+        size_t frameAlignAdjustment;
+        size_t stackArgAreaSize;
+    };
+
+    void beginCall(FunctionCall& call, UseABI useABI, InterModule interModule)
+    {
+        call.reloadMachineStateAfter = interModule == InterModule::True || useABI == UseABI::System;
+        call.usesSystemAbi = useABI == UseABI::System;
+
+        if (call.usesSystemAbi) {
+            // Call-outs need to use the appropriate system ABI.
+#if defined(JS_CODEGEN_ARM)
+# if defined(JS_SIMULATOR_ARM)
+            call.hardFP = UseHardFpABI();
+# elif defined(JS_CODEGEN_ARM_HARDFP)
+            call.hardFP = true;
+# else
+            call.hardFP = false;
+# endif
+            call.abi.setUseHardFp(call.hardFP);
+#endif
+        }
+
+        call.frameAlignAdjustment = ComputeByteAlignment(masm.framePushed() + sizeof(Frame),
+                                                         JitStackAlignment);
+    }
+
+    void endCall(FunctionCall& call, size_t stackSpace)
+    {
+        size_t adjustment = call.stackArgAreaSize + call.frameAlignAdjustment;
+        masm.freeStack(stackSpace + adjustment);
+
+        if (call.reloadMachineStateAfter) {
+            // On x86 there are no pinned registers, so don't waste time
+            // reloading the Tls.
+#ifndef JS_CODEGEN_X86
+            masm.loadWasmTlsRegFromFrame();
+            masm.loadWasmPinnedRegsFromTls();
+#endif
+        }
+    }
+
+    // TODO / OPTIMIZE (Bug 1316821): This is expensive; let's roll the iterator
+    // walking into the walking done for passArg.  See comments in passArg.
+
+    // Note, stackArgAreaSize() must process all the arguments to get the
+    // alignment right; the signature must therefore be the complete call
+    // signature.
+
+    template<class T>
+    size_t stackArgAreaSize(const T& args) {
+        ABIArgIter<const T> i(args);
+        while (!i.done())
+            i++;
+        return AlignBytes(i.stackBytesConsumedSoFar(), 16u);
+    }
+
+    void startCallArgs(FunctionCall& call, size_t stackArgAreaSize)
+    {
+        call.stackArgAreaSize = stackArgAreaSize;
+
+        size_t adjustment = call.stackArgAreaSize + call.frameAlignAdjustment;
+        if (adjustment)
+            masm.reserveStack(adjustment);
+    }
+
+    const ABIArg reservePointerArgument(FunctionCall& call) {
+        return call.abi.next(MIRType::Pointer);
+    }
+
+    // TODO / OPTIMIZE (Bug 1316821): Note passArg is used only in one place.
+    // (Or it was, until Luke wandered through, but that can be fixed again.)
+    // I'm not saying we should manually inline it, but we could hoist the
+    // dispatch into the caller and have type-specific implementations of
+    // passArg: passArgI32(), etc.  Then those might be inlined, at least in PGO
+    // builds.
+    //
+    // The bulk of the work here (60%) is in the next() call, though.
+    //
+    // Notably, since next() is so expensive, stackArgAreaSize() becomes
+    // expensive too.
+    //
+    // Somehow there could be a trick here where the sequence of
+    // argument types (read from the input stream) leads to a cached
+    // entry for stackArgAreaSize() and for how to pass arguments...
+    //
+    // But at least we could reduce the cost of stackArgAreaSize() by
+    // first reading the argument types into a (reusable) vector, then
+    // we have the outgoing size at low cost, and then we can pass
+    // args based on the info we read.
+
+    void passArg(FunctionCall& call, ValType type, Stk& arg) {
+        switch (type) {
+          case ValType::I32: {
+            ABIArg argLoc = call.abi.next(MIRType::Int32);
+            if (argLoc.kind() == ABIArg::Stack) {
+                ScratchI32 scratch(*this);
+                loadI32(scratch, arg);
+                masm.store32(scratch, Address(StackPointer, argLoc.offsetFromArgBase()));
+            } else {
+                loadI32(argLoc.gpr(), arg);
+            }
+            break;
+          }
+          case ValType::I64: {
+            ABIArg argLoc = call.abi.next(MIRType::Int64);
+            if (argLoc.kind() == ABIArg::Stack) {
+                ScratchI32 scratch(*this);
+#if defined(JS_CODEGEN_X64)
+                loadI64(Register64(scratch), arg);
+                masm.movq(scratch, Operand(StackPointer, argLoc.offsetFromArgBase()));
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+                loadI64Low(scratch, arg);
+                masm.store32(scratch, LowWord(Address(StackPointer, argLoc.offsetFromArgBase())));
+                loadI64High(scratch, arg);
+                masm.store32(scratch, HighWord(Address(StackPointer, argLoc.offsetFromArgBase())));
+#else
+                MOZ_CRASH("BaseCompiler platform hook: passArg I64");
+#endif
+            } else {
+                loadI64(argLoc.gpr64(), arg);
+            }
+            break;
+          }
+          case ValType::F64: {
+            ABIArg argLoc = call.abi.next(MIRType::Double);
+            switch (argLoc.kind()) {
+              case ABIArg::Stack: {
+                ScratchF64 scratch(*this);
+                loadF64(scratch, arg);
+                masm.storeDouble(scratch, Address(StackPointer, argLoc.offsetFromArgBase()));
+                break;
+              }
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+              case ABIArg::GPR_PAIR: {
+# ifdef JS_CODEGEN_ARM
+                ScratchF64 scratch(*this);
+                loadF64(scratch, arg);
+                masm.ma_vxfer(scratch, argLoc.evenGpr(), argLoc.oddGpr());
+                break;
+# else
+                MOZ_CRASH("BaseCompiler platform hook: passArg F64 pair");
+# endif
+              }
+#endif
+              case ABIArg::FPU: {
+                loadF64(argLoc.fpu(), arg);
+                break;
+              }
+              case ABIArg::GPR: {
+                MOZ_CRASH("Unexpected parameter passing discipline");
+              }
+              case ABIArg::Uninitialized:
+                MOZ_CRASH("Uninitialized ABIArg kind");
+            }
+            break;
+          }
+          case ValType::F32: {
+            ABIArg argLoc = call.abi.next(MIRType::Float32);
+            switch (argLoc.kind()) {
+              case ABIArg::Stack: {
+                ScratchF32 scratch(*this);
+                loadF32(scratch, arg);
+                masm.storeFloat32(scratch, Address(StackPointer, argLoc.offsetFromArgBase()));
+                break;
+              }
+              case ABIArg::GPR: {
+                ScratchF32 scratch(*this);
+                loadF32(scratch, arg);
+                masm.moveFloat32ToGPR(scratch, argLoc.gpr());
+                break;
+              }
+              case ABIArg::FPU: {
+                loadF32(argLoc.fpu(), arg);
+                break;
+              }
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+              case ABIArg::GPR_PAIR: {
+                MOZ_CRASH("Unexpected parameter passing discipline");
+              }
+#endif
+              case ABIArg::Uninitialized:
+                MOZ_CRASH("Uninitialized ABIArg kind");
+            }
+            break;
+          }
+          default:
+            MOZ_CRASH("Function argument type");
+        }
+    }
+
+    void callDefinition(uint32_t funcIndex, const FunctionCall& call)
+    {
+        CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Func);
+        masm.call(desc, funcIndex);
+    }
+
+    void callSymbolic(SymbolicAddress callee, const FunctionCall& call) {
+        CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Symbolic);
+        masm.call(desc, callee);
+    }
+
+    // Precondition: sync()
+
+    void callIndirect(uint32_t sigIndex, Stk& indexVal, const FunctionCall& call)
+    {
+        const SigWithId& sig = env_.sigs[sigIndex];
+        MOZ_ASSERT(sig.id.kind() != SigIdDesc::Kind::None);
+
+        MOZ_ASSERT(env_.tables.length() == 1);
+        const TableDesc& table = env_.tables[0];
+
+        loadI32(WasmTableCallIndexReg, indexVal);
+
+        CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Dynamic);
+        CalleeDesc callee = CalleeDesc::wasmTable(table, sig.id);
+        masm.wasmCallIndirect(desc, callee, NeedsBoundsCheck(true));
+    }
+
+    // Precondition: sync()
+
+    void callImport(unsigned globalDataOffset, const FunctionCall& call)
+    {
+        CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Dynamic);
+        CalleeDesc callee = CalleeDesc::import(globalDataOffset);
+        masm.wasmCallImport(desc, callee);
+    }
+
+    void builtinCall(SymbolicAddress builtin, const FunctionCall& call)
+    {
+        callSymbolic(builtin, call);
+    }
+
+    void builtinInstanceMethodCall(SymbolicAddress builtin, const ABIArg& instanceArg,
+                                   const FunctionCall& call)
+    {
+        // Builtin method calls assume the TLS register has been set.
+        masm.loadWasmTlsRegFromFrame();
+
+        CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Symbolic);
+        masm.wasmCallBuiltinInstanceMethod(desc, instanceArg, builtin);
+    }
+
+    //////////////////////////////////////////////////////////////////////
+    //
+    // Sundry low-level code generators.
+
+    void addInterruptCheck()
+    {
+        // Always use signals for interrupts with Asm.JS/Wasm
+        MOZ_RELEASE_ASSERT(HaveSignalHandlers());
+    }
+
+    void jumpTable(LabelVector& labels, Label* theTable) {
+        // Flush constant pools to ensure that the table is never interrupted by
+        // constant pool entries.
+        masm.flush();
+
+        masm.bind(theTable);
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+        for (uint32_t i = 0; i < labels.length(); i++) {
+            CodeLabel cl;
+            masm.writeCodePointer(cl.patchAt());
+            cl.target()->bind(labels[i].offset());
+            masm.addCodeLabel(cl);
+        }
+#else
+        MOZ_CRASH("BaseCompiler platform hook: jumpTable");
+#endif
+    }
+
+    void tableSwitch(Label* theTable, RegI32 switchValue, Label* dispatchCode) {
+        masm.bind(dispatchCode);
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+        ScratchI32 scratch(*this);
+        CodeLabel tableCl;
+
+        masm.mov(tableCl.patchAt(), scratch);
+
+        tableCl.target()->bind(theTable->offset());
+        masm.addCodeLabel(tableCl);
+
+        masm.jmp(Operand(scratch, switchValue, ScalePointer));
+#elif defined(JS_CODEGEN_ARM)
+        // Flush constant pools: offset must reflect the distance from the MOV
+        // to the start of the table; as the address of the MOV is given by the
+        // label, nothing must come between the bind() and the ma_mov().
+        masm.flush();
+
+        ScratchI32 scratch(*this);
+
+        // Compute the offset from the ma_mov instruction to the jump table.
+        Label here;
+        masm.bind(&here);
+        uint32_t offset = here.offset() - theTable->offset();
+
+        // Read PC+8
+        masm.ma_mov(pc, scratch);
+
+        // ARM scratch register is required by ma_sub.
+        ScratchRegisterScope arm_scratch(*this);
+
+        // Compute the absolute table base pointer into `scratch`, offset by 8
+        // to account for the fact that ma_mov read PC+8.
+        masm.ma_sub(Imm32(offset + 8), scratch, arm_scratch);
+
+        // Jump indirect via table element.
+        masm.ma_ldr(DTRAddr(scratch, DtrRegImmShift(switchValue, LSL, 2)), pc, Offset,
+                    Assembler::Always);
+#else
+        MOZ_CRASH("BaseCompiler platform hook: tableSwitch");
+#endif
+    }
+
+    RegI32 captureReturnedI32() {
+        RegI32 rv = RegI32(ReturnReg);
+        MOZ_ASSERT(isAvailableI32(rv));
+        needI32(rv);
+        return rv;
+    }
+
+    RegI64 captureReturnedI64() {
+        RegI64 rv = RegI64(ReturnReg64);
+        MOZ_ASSERT(isAvailableI64(rv));
+        needI64(rv);
+        return rv;
+    }
+
+    RegF32 captureReturnedF32(const FunctionCall& call) {
+        RegF32 rv = RegF32(ReturnFloat32Reg);
+        MOZ_ASSERT(isAvailableF32(rv));
+        needF32(rv);
+#if defined(JS_CODEGEN_ARM)
+        if (call.usesSystemAbi && !call.hardFP)
+            masm.ma_vxfer(r0, rv);
+#endif
+        return rv;
+    }
+
+    RegF64 captureReturnedF64(const FunctionCall& call) {
+        RegF64 rv = RegF64(ReturnDoubleReg);
+        MOZ_ASSERT(isAvailableF64(rv));
+        needF64(rv);
+#if defined(JS_CODEGEN_ARM)
+        if (call.usesSystemAbi && !call.hardFP)
+            masm.ma_vxfer(r0, r1, rv);
+#endif
+        return rv;
+    }
+
+    void returnCleanup(bool popStack) {
+        if (popStack)
+            popStackBeforeBranch(controlOutermost().framePushed);
+        masm.jump(&returnLabel_);
+    }
+
+    void pop2xI32ForIntMulDiv(RegI32* r0, RegI32* r1) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+        // srcDest must be eax, and edx will be clobbered.
+        need2xI32(specific_eax, specific_edx);
+        *r1 = popI32();
+        *r0 = popI32ToSpecific(specific_eax);
+        freeI32(specific_edx);
+#else
+        pop2xI32(r0, r1);
+#endif
+    }
+
+    void pop2xI64ForIntDiv(RegI64* r0, RegI64* r1) {
+#ifdef JS_CODEGEN_X64
+        // srcDest must be rax, and rdx will be clobbered.
+        need2xI64(specific_rax, specific_rdx);
+        *r1 = popI64();
+        *r0 = popI64ToSpecific(specific_rax);
+        freeI64(specific_rdx);
+#else
+        pop2xI64(r0, r1);
+#endif
+    }
+
+    void checkDivideByZeroI32(RegI32 rhs, RegI32 srcDest, Label* done) {
+        masm.branchTest32(Assembler::Zero, rhs, rhs, trap(Trap::IntegerDivideByZero));
+    }
+
+    void checkDivideByZeroI64(RegI64 r) {
+        ScratchI32 scratch(*this);
+        masm.branchTest64(Assembler::Zero, r, r, scratch, trap(Trap::IntegerDivideByZero));
+    }
+
+    void checkDivideSignedOverflowI32(RegI32 rhs, RegI32 srcDest, Label* done, bool zeroOnOverflow) {
+        Label notMin;
+        masm.branch32(Assembler::NotEqual, srcDest, Imm32(INT32_MIN), &notMin);
+        if (zeroOnOverflow) {
+            masm.branch32(Assembler::NotEqual, rhs, Imm32(-1), &notMin);
+            masm.move32(Imm32(0), srcDest);
+            masm.jump(done);
+        } else {
+            masm.branch32(Assembler::Equal, rhs, Imm32(-1), trap(Trap::IntegerOverflow));
+        }
+        masm.bind(&notMin);
+    }
+
+    void checkDivideSignedOverflowI64(RegI64 rhs, RegI64 srcDest, Label* done, bool zeroOnOverflow) {
+        Label notmin;
+        masm.branch64(Assembler::NotEqual, srcDest, Imm64(INT64_MIN), &notmin);
+        masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notmin);
+        if (zeroOnOverflow) {
+            masm.xor64(srcDest, srcDest);
+            masm.jump(done);
+        } else {
+            masm.jump(trap(Trap::IntegerOverflow));
+        }
+        masm.bind(&notmin);
+    }
+
+#ifndef RABALDR_INT_DIV_I64_CALLOUT
+    void quotientI64(RegI64 rhs, RegI64 srcDest, IsUnsigned isUnsigned,
+                     bool isConst, int64_t c)
+    {
+        Label done;
+
+        if (!isConst || c == 0)
+            checkDivideByZeroI64(rhs);
+
+        if (!isUnsigned && (!isConst || c == -1))
+            checkDivideSignedOverflowI64(rhs, srcDest, &done, ZeroOnOverflow(false));
+
+# if defined(JS_CODEGEN_X64)
+        // The caller must set up the following situation.
+        MOZ_ASSERT(srcDest.reg == rax);
+        MOZ_ASSERT(isAvailableI64(specific_rdx));
+        if (isUnsigned) {
+            masm.xorq(rdx, rdx);
+            masm.udivq(rhs.reg);
+        } else {
+            masm.cqo();
+            masm.idivq(rhs.reg);
+        }
+# else
+        MOZ_CRASH("BaseCompiler platform hook: quotientI64");
+# endif
+        masm.bind(&done);
+    }
+
+    void remainderI64(RegI64 rhs, RegI64 srcDest, IsUnsigned isUnsigned,
+                      bool isConst, int64_t c)
+    {
+        Label done;
+
+        if (!isConst || c == 0)
+            checkDivideByZeroI64(rhs);
+
+        if (!isUnsigned && (!isConst || c == -1))
+            checkDivideSignedOverflowI64(rhs, srcDest, &done, ZeroOnOverflow(true));
+
+# if defined(JS_CODEGEN_X64)
+        // The caller must set up the following situation.
+        MOZ_ASSERT(srcDest.reg == rax);
+        MOZ_ASSERT(isAvailableI64(specific_rdx));
+
+        if (isUnsigned) {
+            masm.xorq(rdx, rdx);
+            masm.udivq(rhs.reg);
+        } else {
+            masm.cqo();
+            masm.idivq(rhs.reg);
+        }
+        masm.movq(rdx, rax);
+# else
+        MOZ_CRASH("BaseCompiler platform hook: remainderI64");
+# endif
+        masm.bind(&done);
+    }
+#endif // RABALDR_INT_DIV_I64_CALLOUT
+
+    void pop2xI32ForShiftOrRotate(RegI32* r0, RegI32* r1) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+        *r1 = popI32(specific_ecx);
+        *r0 = popI32();
+#else
+        pop2xI32(r0, r1);
+#endif
+    }
+
+    void pop2xI64ForShiftOrRotate(RegI64* r0, RegI64* r1) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+        needI32(specific_ecx);
+        *r1 = widenI32(specific_ecx);
+        *r1 = popI64ToSpecific(*r1);
+        *r0 = popI64();
+#else
+        pop2xI64(r0, r1);
+#endif
+    }
+
+    bool rotate64NeedsTemp() const {
+#if defined(JS_CODEGEN_X86)
+        return true;
+#else
+        return false;
+#endif
+    }
+
+    void maskShiftCount32(RegI32 r) {
+#if defined(JS_CODEGEN_ARM)
+        masm.and32(Imm32(31), r);
+#endif
+    }
+
+    bool popcnt32NeedsTemp() const {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+        return !AssemblerX86Shared::HasPOPCNT();
+#elif defined(JS_CODEGEN_ARM)
+        return true;
+#else
+        MOZ_CRASH("BaseCompiler platform hook: popcnt32NeedsTemp");
+#endif
+    }
+
+    bool popcnt64NeedsTemp() const {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+        return !AssemblerX86Shared::HasPOPCNT();
+#elif defined(JS_CODEGEN_ARM)
+        return true;
+#else
+        MOZ_CRASH("BaseCompiler platform hook: popcnt64NeedsTemp");
+#endif
+    }
+
+    RegI64 popI32ForSignExtendI64() {
+#if defined(JS_CODEGEN_X86)
+        need2xI32(specific_edx, specific_eax);
+        RegI32 r0 = popI32ToSpecific(specific_eax);
+        RegI64 x0 = RegI64(Register64(specific_edx, specific_eax));
+        (void)r0;               // x0 is the widening of r0
+#else
+        RegI32 r0 = popI32();
+        RegI64 x0 = widenI32(r0);
+#endif
+        return x0;
+    }
+
+    RegI64 popI64ForSignExtendI64() {
+#if defined(JS_CODEGEN_X86)
+        need2xI32(specific_edx, specific_eax);
+        // Low on top, high underneath
+        return popI64ToSpecific(RegI64(Register64(specific_edx, specific_eax)));
+#else
+        return popI64();
+#endif
+    }
+
+    class OutOfLineTruncateF32OrF64ToI32 : public OutOfLineCode
+    {
+        AnyReg src;
+        RegI32 dest;
+        bool isUnsigned;
+        BytecodeOffset off;
+
+      public:
+        OutOfLineTruncateF32OrF64ToI32(AnyReg src, RegI32 dest, bool isUnsigned, BytecodeOffset off)
+          : src(src),
+            dest(dest),
+            isUnsigned(isUnsigned),
+            off(off)
+        {}
+
+        virtual void generate(MacroAssembler& masm) {
+            bool isFloat = src.tag == AnyReg::F32;
+            FloatRegister fsrc = isFloat ? static_cast<FloatRegister>(src.f32())
+                                         : static_cast<FloatRegister>(src.f64());
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+            if (isFloat)
+                masm.outOfLineWasmTruncateFloat32ToInt32(fsrc, isUnsigned, off, rejoin());
+            else
+                masm.outOfLineWasmTruncateDoubleToInt32(fsrc, isUnsigned, off, rejoin());
+#elif defined(JS_CODEGEN_ARM)
+            masm.outOfLineWasmTruncateToIntCheck(fsrc,
+                                                 isFloat ? MIRType::Float32 : MIRType::Double,
+                                                 MIRType::Int32, isUnsigned, rejoin(), off);
+#else
+            (void)isUnsigned;
+            (void)off;
+            (void)isFloat;
+            (void)fsrc;
+            MOZ_CRASH("BaseCompiler platform hook: OutOfLineTruncateF32OrF64ToI32 wasm");
+#endif
+        }
+    };
+
+    MOZ_MUST_USE bool truncateF32ToI32(RegF32 src, RegI32 dest, bool isUnsigned) {
+        BytecodeOffset off = bytecodeOffset();
+        OutOfLineCode* ool;
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
+        ool = new(alloc_) OutOfLineTruncateF32OrF64ToI32(AnyReg(src), dest, isUnsigned, off);
+        ool = addOutOfLineCode(ool);
+        if (!ool)
+            return false;
+        if (isUnsigned)
+            masm.wasmTruncateFloat32ToUInt32(src, dest, ool->entry());
+        else
+            masm.wasmTruncateFloat32ToInt32(src, dest, ool->entry());
+#else
+        (void)off;
+        MOZ_CRASH("BaseCompiler platform hook: truncateF32ToI32 wasm");
+#endif
+        masm.bind(ool->rejoin());
+        return true;
+    }
+
+    MOZ_MUST_USE bool truncateF64ToI32(RegF64 src, RegI32 dest, bool isUnsigned) {
+        BytecodeOffset off = bytecodeOffset();
+        OutOfLineCode* ool;
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
+        ool = new(alloc_) OutOfLineTruncateF32OrF64ToI32(AnyReg(src), dest, isUnsigned, off);
+        ool = addOutOfLineCode(ool);
+        if (!ool)
+            return false;
+        if (isUnsigned)
+            masm.wasmTruncateDoubleToUInt32(src, dest, ool->entry());
+        else
+            masm.wasmTruncateDoubleToInt32(src, dest, ool->entry());
+#else
+        (void)off;
+        MOZ_CRASH("BaseCompiler platform hook: truncateF64ToI32 wasm");
+#endif
+        masm.bind(ool->rejoin());
+        return true;
+    }
+
+    // This does not generate a value; if the truncation failed then it traps.
+
+    class OutOfLineTruncateCheckF32OrF64ToI64 : public OutOfLineCode
+    {
+        AnyReg src;
+        bool isUnsigned;
+        BytecodeOffset off;
+
+      public:
+        OutOfLineTruncateCheckF32OrF64ToI64(AnyReg src, bool isUnsigned, BytecodeOffset off)
+          : src(src),
+            isUnsigned(isUnsigned),
+            off(off)
+        {}
+
+        virtual void generate(MacroAssembler& masm) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+            if (src.tag == AnyReg::F32)
+                masm.outOfLineWasmTruncateFloat32ToInt64(src.f32(), isUnsigned, off, rejoin());
+            else if (src.tag == AnyReg::F64)
+                masm.outOfLineWasmTruncateDoubleToInt64(src.f64(), isUnsigned, off, rejoin());
+            else
+                MOZ_CRASH("unexpected type");
+#elif defined(JS_CODEGEN_ARM)
+            if (src.tag == AnyReg::F32)
+                masm.outOfLineWasmTruncateToIntCheck(src.f32(), MIRType::Float32,
+                                                     MIRType::Int64, isUnsigned, rejoin(), off);
+            else if (src.tag == AnyReg::F64)
+                masm.outOfLineWasmTruncateToIntCheck(src.f64(), MIRType::Double, MIRType::Int64,
+                                                     isUnsigned, rejoin(), off);
+            else
+                MOZ_CRASH("unexpected type");
+#else
+            (void)src;
+            (void)isUnsigned;
+            (void)off;
+            MOZ_CRASH("BaseCompiler platform hook: OutOfLineTruncateCheckF32OrF64ToI64");
+#endif
+        }
+    };
+
+#ifndef RABALDR_FLOAT_TO_I64_CALLOUT
+    MOZ_MUST_USE bool truncateF32ToI64(RegF32 src, RegI64 dest, bool isUnsigned, RegF64 temp) {
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+        OutOfLineCode* ool =
+            addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(AnyReg(src),
+                                                                              isUnsigned,
+                                                                              bytecodeOffset()));
+        if (!ool)
+            return false;
+        if (isUnsigned)
+            masm.wasmTruncateFloat32ToUInt64(src, dest, ool->entry(),
+                                             ool->rejoin(), temp);
+        else
+            masm.wasmTruncateFloat32ToInt64(src, dest, ool->entry(),
+                                            ool->rejoin(), temp);
+# else
+        MOZ_CRASH("BaseCompiler platform hook: truncateF32ToI64");
+# endif
+        return true;
+    }
+
+    MOZ_MUST_USE bool truncateF64ToI64(RegF64 src, RegI64 dest, bool isUnsigned, RegF64 temp) {
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+        OutOfLineCode* ool =
+            addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(AnyReg(src),
+                                                                              isUnsigned,
+                                                                              bytecodeOffset()));
+        if (!ool)
+            return false;
+        if (isUnsigned)
+            masm.wasmTruncateDoubleToUInt64(src, dest, ool->entry(),
+                                            ool->rejoin(), temp);
+        else
+            masm.wasmTruncateDoubleToInt64(src, dest, ool->entry(),
+                                           ool->rejoin(), temp);
+# else
+        MOZ_CRASH("BaseCompiler platform hook: truncateF64ToI64");
+# endif
+        return true;
+    }
+#endif // RABALDR_FLOAT_TO_I64_CALLOUT
+
+#ifndef RABALDR_I64_TO_FLOAT_CALLOUT
+    bool convertI64ToFloatNeedsTemp(ValType to, bool isUnsigned) const {
+# if defined(JS_CODEGEN_X86)
+        return isUnsigned &&
+               ((to == ValType::F64 && AssemblerX86Shared::HasSSE3()) ||
+               to == ValType::F32);
+# else
+        return isUnsigned;
+# endif
+    }
+
+    void convertI64ToF32(RegI64 src, bool isUnsigned, RegF32 dest, RegI32 temp) {
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+        if (isUnsigned)
+            masm.convertUInt64ToFloat32(src, dest, temp);
+        else
+            masm.convertInt64ToFloat32(src, dest);
+# else
+        MOZ_CRASH("BaseCompiler platform hook: convertI64ToF32");
+# endif
+    }
+
+    void convertI64ToF64(RegI64 src, bool isUnsigned, RegF64 dest, RegI32 temp) {
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+        if (isUnsigned)
+            masm.convertUInt64ToDouble(src, dest, temp);
+        else
+            masm.convertInt64ToDouble(src, dest);
+# else
+        MOZ_CRASH("BaseCompiler platform hook: convertI64ToF64");
+# endif
+    }
+#endif // RABALDR_I64_TO_FLOAT_CALLOUT
+
+    void cmp64Set(Assembler::Condition cond, RegI64 lhs, RegI64 rhs, RegI32 dest) {
+#if defined(JS_CODEGEN_X64)
+        masm.cmpq(rhs.reg, lhs.reg);
+        masm.emitSet(cond, dest);
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+        // TODO / OPTIMIZE (Bug 1316822): This is pretty branchy, we should be
+        // able to do better.
+        Label done, condTrue;
+        masm.branch64(cond, lhs, rhs, &condTrue);
+        masm.move32(Imm32(0), dest);
+        masm.jump(&done);
+        masm.bind(&condTrue);
+        masm.move32(Imm32(1), dest);
+        masm.bind(&done);
+#else
+        MOZ_CRASH("BaseCompiler platform hook: cmp64Set");
+#endif
+    }
+
+    void eqz64(RegI64 src, RegI32 dest) {
+#if defined(JS_CODEGEN_X64)
+        masm.cmpq(Imm32(0), src.reg);
+        masm.emitSet(Assembler::Equal, dest);
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+        masm.or32(src.high, src.low);
+        masm.cmp32(src.low, Imm32(0));
+        masm.emitSet(Assembler::Equal, dest);
+#else
+        MOZ_CRASH("BaseCompiler platform hook: eqz64");
+#endif
+    }
+
+    void unreachableTrap()
+    {
+        masm.jump(trap(Trap::Unreachable));
+#ifdef DEBUG
+        masm.breakpoint();
+#endif
+    }
+
+
+    MOZ_MUST_USE bool
+    supportsRoundInstruction(RoundingMode mode)
+    {
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+        return Assembler::HasRoundInstruction(mode);
+#else
+        return false;
+#endif
+    }
+
+    void
+    roundF32(RoundingMode roundingMode, RegF32 f0)
+    {
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+        masm.vroundss(Assembler::ToX86RoundingMode(roundingMode), f0, f0, f0);
+#else
+        MOZ_CRASH("NYI");
+#endif
+    }
+
+    void
+    roundF64(RoundingMode roundingMode, RegF64 f0)
+    {
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+        masm.vroundsd(Assembler::ToX86RoundingMode(roundingMode), f0, f0, f0);
+#else
+        MOZ_CRASH("NYI");
+#endif
+    }
+
+    //////////////////////////////////////////////////////////////////////
+    //
+    // Global variable access.
+
+    uint32_t globalToTlsOffset(uint32_t globalOffset) {
+        return offsetof(TlsData, globalArea) + globalOffset;
+    }
+
+    void loadGlobalVarI32(unsigned globalDataOffset, RegI32 r)
+    {
+        ScratchI32 tmp(*this);
+        masm.loadWasmTlsRegFromFrame(tmp);
+        masm.load32(Address(tmp, globalToTlsOffset(globalDataOffset)), r);
+    }
+
+    void loadGlobalVarI64(unsigned globalDataOffset, RegI64 r)
+    {
+        ScratchI32 tmp(*this);
+        masm.loadWasmTlsRegFromFrame(tmp);
+        masm.load64(Address(tmp, globalToTlsOffset(globalDataOffset)), r);
+    }
+
+    void loadGlobalVarF32(unsigned globalDataOffset, RegF32 r)
+    {
+        ScratchI32 tmp(*this);
+        masm.loadWasmTlsRegFromFrame(tmp);
+        masm.loadFloat32(Address(tmp, globalToTlsOffset(globalDataOffset)), r);
+    }
+
+    void loadGlobalVarF64(unsigned globalDataOffset, RegF64 r)
+    {
+        ScratchI32 tmp(*this);
+        masm.loadWasmTlsRegFromFrame(tmp);
+        masm.loadDouble(Address(tmp, globalToTlsOffset(globalDataOffset)), r);
+    }
+
+    void storeGlobalVarI32(unsigned globalDataOffset, RegI32 r)
+    {
+        ScratchI32 tmp(*this);
+        masm.loadWasmTlsRegFromFrame(tmp);
+        masm.store32(r, Address(tmp, globalToTlsOffset(globalDataOffset)));
+    }
+
+    void storeGlobalVarI64(unsigned globalDataOffset, RegI64 r)
+    {
+        ScratchI32 tmp(*this);
+        masm.loadWasmTlsRegFromFrame(tmp);
+        masm.store64(r, Address(tmp, globalToTlsOffset(globalDataOffset)));
+    }
+
+    void storeGlobalVarF32(unsigned globalDataOffset, RegF32 r)
+    {
+        ScratchI32 tmp(*this);
+        masm.loadWasmTlsRegFromFrame(tmp);
+        masm.storeFloat32(r, Address(tmp, globalToTlsOffset(globalDataOffset)));
+    }
+
+    void storeGlobalVarF64(unsigned globalDataOffset, RegF64 r)
+    {
+        ScratchI32 tmp(*this);
+        masm.loadWasmTlsRegFromFrame(tmp);
+        masm.storeDouble(r, Address(tmp, globalToTlsOffset(globalDataOffset)));
+    }
+
+    //////////////////////////////////////////////////////////////////////
+    //
+    // Heap access.
+
+    void bceCheckLocal(MemoryAccessDesc* access, AccessCheck* check, uint32_t local) {
+        if (local >= sizeof(BCESet)*8)
+            return;
+
+        if ((bceSafe_ & (BCESet(1) << local)) && access->offset() < wasm::OffsetGuardLimit)
+            check->omitBoundsCheck = true;
+
+        // The local becomes safe even if the offset is beyond the guard limit.
+        bceSafe_ |= (BCESet(1) << local);
+    }
+
+    void bceLocalIsUpdated(uint32_t local) {
+        if (local >= sizeof(BCESet)*8)
+            return;
+
+        bceSafe_ &= ~(BCESet(1) << local);
+    }
+
+    void prepareMemoryAccess(MemoryAccessDesc* access, AccessCheck* check, RegI32 tls, RegI32 ptr) {
+
+        // Fold offset if necessary for further computations.
+
+        if (access->offset() >= OffsetGuardLimit ||
+            (access->isAtomic() && !check->omitAlignmentCheck && !check->onlyPointerAlignment))
+        {
+            masm.branchAdd32(Assembler::CarrySet, Imm32(access->offset()), ptr,
+                             trap(Trap::OutOfBounds));
+            access->clearOffset();
+            check->onlyPointerAlignment = true;
+        }
+
+        // Alignment check if required.
+
+        if (access->isAtomic() && !check->omitAlignmentCheck) {
+            MOZ_ASSERT(check->onlyPointerAlignment);
+            // We only care about the low pointer bits here.
+            masm.branchTest32(Assembler::NonZero, ptr, Imm32(access->byteSize() - 1),
+                              trap(Trap::UnalignedAccess));
+        }
+
+        // Ensure no tls if we don't need it.
+
+#ifdef WASM_HUGE_MEMORY
+        // We have HeapReg and no bounds checking and need load neither
+        // memoryBase nor boundsCheckLimit from tls.
+        MOZ_ASSERT_IF(check->omitBoundsCheck, tls == invalidI32());
+#endif
+#ifdef JS_CODEGEN_ARM
+        // We have HeapReg on ARM and don't need to load the memoryBase from tls.
+        MOZ_ASSERT_IF(check->omitBoundsCheck, tls == invalidI32());
+#endif
+
+        // Bounds check if required.
+
+#ifndef WASM_HUGE_MEMORY
+        if (!check->omitBoundsCheck) {
+            masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr,
+                                 Address(tls, offsetof(TlsData, boundsCheckLimit)),
+                                 trap(Trap::OutOfBounds));
+        }
+#endif
+    }
+
+    void needLoadTemps(const MemoryAccessDesc& access, RegI32* tmp1, RegI32* tmp2, RegI32* tmp3) {
+#if defined(JS_CODEGEN_ARM)
+        if (IsUnaligned(access)) {
+            switch (access.type()) {
+              case Scalar::Float64:
+                *tmp3 = needI32();
+                MOZ_FALLTHROUGH;
+              case Scalar::Float32:
+                *tmp2 = needI32();
+                MOZ_FALLTHROUGH;
+              default:
+                *tmp1 = needI32();
+                break;
+            }
+        }
+#endif
+    }
+
+    MOZ_MUST_USE bool needTlsForAccess(const AccessCheck& check) {
+#if defined(JS_CODEGEN_ARM)
+        return !check.omitBoundsCheck;
+#elif defined(JS_CODEGEN_X86)
+        return true;
+#else
+        return false;
+#endif
+    }
+
+    // ptr and dest may be the same iff dest is I32.
+    // This may destroy ptr even if ptr and dest are not the same.
+    MOZ_MUST_USE bool load(MemoryAccessDesc* access, AccessCheck* check, RegI32 tls, RegI32 ptr,
+                           AnyReg dest, RegI32 tmp1, RegI32 tmp2, RegI32 tmp3)
+    {
+        prepareMemoryAccess(access, check, tls, ptr);
+
+#if defined(JS_CODEGEN_X64)
+        Operand srcAddr(HeapReg, ptr, TimesOne, access->offset());
+
+        if (dest.tag == AnyReg::I64)
+            masm.wasmLoadI64(*access, srcAddr, dest.i64());
+        else
+            masm.wasmLoad(*access, srcAddr, dest.any());
+#elif defined(JS_CODEGEN_X86)
+        masm.addPtr(Address(tls, offsetof(TlsData, memoryBase)), ptr);
+        Operand srcAddr(ptr, access->offset());
+
+        if (dest.tag == AnyReg::I64) {
+            MOZ_ASSERT(dest.i64() == abiReturnRegI64);
+            masm.wasmLoadI64(*access, srcAddr, dest.i64());
+        } else {
+            bool byteRegConflict = access->byteSize() == 1 && !ra.isSingleByteI32(dest.i32());
+            AnyRegister out = byteRegConflict ? AnyRegister(ScratchRegX86) : dest.any();
+
+            masm.wasmLoad(*access, srcAddr, out);
+
+            if (byteRegConflict)
+                masm.mov(ScratchRegX86, dest.i32());
+        }
+#elif defined(JS_CODEGEN_ARM)
+        if (IsUnaligned(*access)) {
+            switch (dest.tag) {
+              case AnyReg::I64:
+                masm.wasmUnalignedLoadI64(*access, HeapReg, ptr, ptr, dest.i64(), tmp1);
+                break;
+              case AnyReg::F32:
+                masm.wasmUnalignedLoadFP(*access, HeapReg, ptr, ptr, dest.f32(), tmp1, tmp2,
+                                         Register::Invalid());
+                break;
+              case AnyReg::F64:
+                masm.wasmUnalignedLoadFP(*access, HeapReg, ptr, ptr, dest.f64(), tmp1, tmp2, tmp3);
+                break;
+              default:
+                masm.wasmUnalignedLoad(*access, HeapReg, ptr, ptr, dest.i32(), tmp1);
+                break;
+            }
+        } else {
+            if (dest.tag == AnyReg::I64)
+                masm.wasmLoadI64(*access, HeapReg, ptr, ptr, dest.i64());
+            else
+                masm.wasmLoad(*access, HeapReg, ptr, ptr, dest.any());
+        }
+#else
+        MOZ_CRASH("BaseCompiler platform hook: load");
+#endif
+
+        return true;
+    }
+
+    void needStoreTemps(const MemoryAccessDesc& access, ValType srcType, RegI32* tmp) {
+#if defined(JS_CODEGEN_ARM)
+        if (IsUnaligned(access) && srcType != ValType::I32)
+            *tmp = needI32();
+#endif
+    }
+
+    // ptr and src must not be the same register.
+    // This may destroy ptr and src.
+    MOZ_MUST_USE bool store(MemoryAccessDesc* access, AccessCheck* check, RegI32 tls, RegI32 ptr,
+                            AnyReg src, RegI32 tmp)
+    {
+        prepareMemoryAccess(access, check, tls, ptr);
+
+        // Emit the store
+#if defined(JS_CODEGEN_X64)
+        MOZ_ASSERT(tmp == invalidI32());
+        Operand dstAddr(HeapReg, ptr, TimesOne, access->offset());
+
+        masm.wasmStore(*access, src.any(), dstAddr);
+#elif defined(JS_CODEGEN_X86)
+        MOZ_ASSERT(tmp == invalidI32());
+        masm.addPtr(Address(tls, offsetof(TlsData, memoryBase)), ptr);
+        Operand dstAddr(ptr, access->offset());
+
+        if (access->type() == Scalar::Int64) {
+            masm.wasmStoreI64(*access, src.i64(), dstAddr);
+        } else {
+            AnyRegister value;
+            if (src.tag == AnyReg::I64) {
+                if (access->byteSize() == 1 && !ra.isSingleByteI32(src.i64().low)) {
+                    masm.mov(src.i64().low, ScratchRegX86);
+                    value = AnyRegister(ScratchRegX86);
+                } else {
+                    value = AnyRegister(src.i64().low);
+                }
+            } else if (access->byteSize() == 1 && !ra.isSingleByteI32(src.i32())) {
+                masm.mov(src.i32(), ScratchRegX86);
+                value = AnyRegister(ScratchRegX86);
+            } else {
+                value = src.any();
+            }
+
+            masm.wasmStore(*access, value, dstAddr);
+        }
+#elif defined(JS_CODEGEN_ARM)
+        if (IsUnaligned(*access)) {
+            switch (src.tag) {
+              case AnyReg::I64:
+                masm.wasmUnalignedStoreI64(*access, src.i64(), HeapReg, ptr, ptr, tmp);
+                break;
+              case AnyReg::F32:
+                masm.wasmUnalignedStoreFP(*access, src.f32(), HeapReg, ptr, ptr, tmp);
+                break;
+              case AnyReg::F64:
+                masm.wasmUnalignedStoreFP(*access, src.f64(), HeapReg, ptr, ptr, tmp);
+                break;
+              default:
+                MOZ_ASSERT(tmp == invalidI32());
+                masm.wasmUnalignedStore(*access, src.i32(), HeapReg, ptr, ptr);
+                break;
+            }
+        } else {
+            MOZ_ASSERT(tmp == invalidI32());
+            if (access->type() == Scalar::Int64)
+                masm.wasmStoreI64(*access, src.i64(), HeapReg, ptr, ptr);
+            else if (src.tag == AnyReg::I64)
+                masm.wasmStore(*access, AnyRegister(src.i64().low), HeapReg, ptr, ptr);
+            else
+                masm.wasmStore(*access, src.any(), HeapReg, ptr, ptr);
+        }
+#else
+        MOZ_CRASH("BaseCompiler platform hook: store");
+#endif
+
+        return true;
+    }
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
+
+# define ATOMIC_PTR(name, access, tls, ptr)                             \
+    BaseIndex name(HeapReg, (ptr), TimesOne, (access)->offset())
+
+#elif defined(JS_CODEGEN_X86)
+
+# define ATOMIC_PTR(name, access, tls, ptr)                             \
+    MOZ_ASSERT((tls) != invalidI32());                                  \
+    masm.addPtr(Address((tls), offsetof(TlsData, memoryBase)), (ptr));  \
+    Address name((ptr), (access)->offset())
+
+#else
+
+# define ATOMIC_PTR(name, access, tls, ptr)                       \
+    MOZ_CRASH("BaseCompiler platform hook: address computation"); \
+    Address srcAddr
+
+#endif
+
+    void xchg64(MemoryAccessDesc* access, ValType type, WantResult wantResult)
+    {
+#if defined(JS_CODEGEN_X86)
+        RegI64 rd = specific_edx_eax;
+        needI64(rd);
+        needI32(specific_ecx);
+        // Claim scratch after the need() calls because they may need it to
+        // sync.
+        ScratchEBX scratch(*this);
+        RegI64 rv = specific_ecx_ebx;
+#elif defined(JS_CODEGEN_ARM)
+        RegI64 rv = needI64Pair();
+        RegI64 rd = needI64Pair();
+#else
+        RegI64 rv, rd;
+        MOZ_CRASH("BaseCompiler porting interface: xchg64");
+#endif
+
+        popI64ToSpecific(rv);
+
+        AccessCheck check;
+        RegI32 rp = popMemoryAccess(access, &check);
+        RegI32 tls = maybeLoadTlsForAccess(check);
+        prepareMemoryAccess(access, &check, tls, rp);
+        ATOMIC_PTR(srcAddr, access, tls, rp);
+
+        masm.atomicExchange64(srcAddr, rv, rd);
+
+        if (wantResult)
+            pushI64(rd);
+        else
+            freeI64(rd);
+
+        maybeFreeI32(tls);
+        freeI32(rp);
+
+#if defined(JS_CODEGEN_X86)
+        freeI32(specific_ecx);
+#elif defined(JS_CODEGEN_ARM)
+        freeI64(rv);
+#else
+        MOZ_CRASH("BaseCompiler porting interface: xchg64");
+#endif
+    }
+
+    void needAtomicRMWTemps(AtomicOp op, MemoryAccessDesc* access, RegI32* tmp) {
+#if defined(JS_CODEGEN_X86)
+        // Handled specially in atomicRMW
+        if (access->byteSize() == 1)
+            return;
+#endif
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+        if (op != AtomicFetchAddOp && op != AtomicFetchSubOp)
+            *tmp = needI32();
+#elif defined(JS_CODEGEN_ARM)
+        *tmp = needI32();
+#else
+        MOZ_CRASH("BaseCompiler platform hook: atomicRMWTemps");
+#endif
+    }
+
+    void
+    atomicRMW(AtomicOp op, MemoryAccessDesc* access, AccessCheck* check, RegI32 tls, RegI32 ptr,
+              RegI32 rv, RegI32 rd, RegI32 tmp)
+    {
+        prepareMemoryAccess(access, check, tls, ptr);
+        ATOMIC_PTR(srcAddr, access, tls, ptr);
+
+        switch (access->type()) {
+          case Scalar::Uint8: {
+            RegI32 v = rv;
+            RegI32 d = rd;
+#ifdef JS_CODEGEN_X86
+            // The temp, if used, must be a byte register.
+            MOZ_ASSERT(tmp == invalidI32());
+            ScratchEBX scratch(*this);
+            if (op != AtomicFetchAddOp && op != AtomicFetchSubOp)
+                tmp = RegI32(scratch);
+#endif
+            switch (op) {
+              case AtomicFetchAddOp: masm.atomicFetchAdd8ZeroExtend(v, srcAddr, tmp, d); break;
+              case AtomicFetchSubOp: masm.atomicFetchSub8ZeroExtend(v, srcAddr, tmp, d); break;
+              case AtomicFetchAndOp: masm.atomicFetchAnd8ZeroExtend(v, srcAddr, tmp, d); break;
+              case AtomicFetchOrOp:  masm.atomicFetchOr8ZeroExtend(v, srcAddr, tmp, d); break;
+              case AtomicFetchXorOp: masm.atomicFetchXor8ZeroExtend(v, srcAddr, tmp, d); break;
+              default: MOZ_CRASH("No such op");
+            }
+            break;
+          }
+          case Scalar::Uint16: {
+            switch (op) {
+              case AtomicFetchAddOp: masm.atomicFetchAdd16ZeroExtend(rv, srcAddr, tmp, rd); break;
+              case AtomicFetchSubOp: masm.atomicFetchSub16ZeroExtend(rv, srcAddr, tmp, rd); break;
+              case AtomicFetchAndOp: masm.atomicFetchAnd16ZeroExtend(rv, srcAddr, tmp, rd); break;
+              case AtomicFetchOrOp:  masm.atomicFetchOr16ZeroExtend(rv, srcAddr, tmp, rd); break;
+              case AtomicFetchXorOp: masm.atomicFetchXor16ZeroExtend(rv, srcAddr, tmp, rd); break;
+              default: MOZ_CRASH("No such op");
+            }
+            break;
+          }
+          case Scalar::Int32:
+          case Scalar::Uint32: {
+            switch (op) {
+              case AtomicFetchAddOp: masm.atomicFetchAdd32(rv, srcAddr, tmp, rd); break;
+              case AtomicFetchSubOp: masm.atomicFetchSub32(rv, srcAddr, tmp, rd); break;
+              case AtomicFetchAndOp: masm.atomicFetchAnd32(rv, srcAddr, tmp, rd); break;
+              case AtomicFetchOrOp:  masm.atomicFetchOr32(rv, srcAddr, tmp, rd); break;
+              case AtomicFetchXorOp: masm.atomicFetchXor32(rv, srcAddr, tmp, rd); break;
+              default: MOZ_CRASH("No such op");
+            }
+            break;
+          }
+          default: {
+            MOZ_CRASH("Bad type for atomic operation");
+          }
+        }
+    }
+
+    void needAtomicRMW64Temps(AtomicOp op, RegI64* tmp) {
+#if defined(JS_CODEGEN_X86)
+        MOZ_CRASH("Do not call on x86");
+#elif defined(JS_CODEGEN_X64)
+        if (op != AtomicFetchAddOp && op != AtomicFetchSubOp)
+            *tmp = needI64();
+#elif defined(JS_CODEGEN_ARM)
+        *tmp = needI64Pair();
+#else
+        MOZ_CRASH("BaseCompiler platform hook: atomicRMW64Temps");
+#endif
+    }
+
+    // On x86, T is Address.  On other platforms, it is Register64.
+    // U is BaseIndex or Address.
+    template <typename T, typename U>
+    void
+    atomicRMW64(AtomicOp op, T value, const U& srcAddr, Register64 tmp, Register64 rd) {
+        switch (op) {
+          case AtomicFetchAddOp: masm.atomicFetchAdd64(value, srcAddr, tmp, rd); break;
+          case AtomicFetchSubOp: masm.atomicFetchSub64(value, srcAddr, tmp, rd); break;
+          case AtomicFetchAndOp: masm.atomicFetchAnd64(value, srcAddr, tmp, rd); break;
+          case AtomicFetchOrOp:  masm.atomicFetchOr64(value, srcAddr, tmp, rd); break;
+          case AtomicFetchXorOp: masm.atomicFetchXor64(value, srcAddr, tmp, rd); break;
+          default: MOZ_CRASH("No such op");
+        }
+    }
+
+    void
+    atomicCompareExchange(MemoryAccessDesc* access, AccessCheck* check, RegI32 tls, RegI32 ptr,
+                          RegI32 rexpect, RegI32 rnew, RegI32 rd)
+    {
+        prepareMemoryAccess(access, check, tls, ptr);
+        ATOMIC_PTR(srcAddr, access, tls, ptr);
+
+        switch (access->type()) {
+          case Scalar::Uint8: {
+#if defined(JS_CODEGEN_X86)
+            ScratchEBX scratch(*this);
+            MOZ_ASSERT(rd == specific_eax);
+            if (!ra.isSingleByteI32(rnew)) {
+                // The replacement value must have a byte persona.
+                masm.movl(rnew, scratch);
+                rnew = RegI32(scratch);
+            }
+#endif
+            masm.compareExchange8ZeroExtend(srcAddr, rexpect, rnew, rd);
+            break;
+          }
+          case Scalar::Uint16:
+            masm.compareExchange16ZeroExtend(srcAddr, rexpect, rnew, rd);
+            break;
+          case Scalar::Int32:
+          case Scalar::Uint32:
+            masm.compareExchange32(srcAddr, rexpect, rnew, rd);
+            break;
+          default:
+            MOZ_CRASH("Bad type for atomic operation");
+        }
+    }
+
+    void
+    atomicExchange(MemoryAccessDesc* access, AccessCheck* check, RegI32 tls, RegI32 ptr,
+                   RegI32 rv, RegI32 rd)
+    {
+        prepareMemoryAccess(access, check, tls, ptr);
+        ATOMIC_PTR(srcAddr, access, tls, ptr);
+
+        switch (access->type()) {
+          case Scalar::Uint8: {
+#if defined(JS_CODEGEN_X86)
+            if (!ra.isSingleByteI32(rd)) {
+                ScratchEBX scratch(*this);
+                // The output register must have a byte persona.
+                masm.atomicExchange8ZeroExtend(srcAddr, rv, scratch);
+                masm.movl(scratch, rd);
+            } else {
+                masm.atomicExchange8ZeroExtend(srcAddr, rv, rd);
+            }
+#else
+            masm.atomicExchange8ZeroExtend(srcAddr, rv, rd);
+#endif
+            break;
+          }
+          case Scalar::Uint16:
+            masm.atomicExchange16ZeroExtend(srcAddr, rv, rd);
+            break;
+          case Scalar::Int32:
+          case Scalar::Uint32:
+            masm.atomicExchange32(srcAddr, rv, rd);
+            break;
+          default:
+            MOZ_CRASH("Bad type for atomic operation");
+        }
+    }
+
+    ////////////////////////////////////////////////////////////
+
+    // Generally speaking, ABOVE this point there should be no value
+    // stack manipulation (calls to popI32 etc).
+
+    // Generally speaking, BELOW this point there should be no
+    // platform dependencies.  We make an exception for x86 register
+    // targeting, which is not too hard to keep clean.
+
+    ////////////////////////////////////////////////////////////
+    //
+    // Sundry wrappers.
+
+    void pop2xI32(RegI32* r0, RegI32* r1) {
+        *r1 = popI32();
+        *r0 = popI32();
+    }
+
+    RegI32 popI32ToSpecific(RegI32 specific) {
+        freeI32(specific);
+        return popI32(specific);
+    }
+
+    void pop2xI64(RegI64* r0, RegI64* r1) {
+        *r1 = popI64();
+        *r0 = popI64();
+    }
+
+    RegI64 popI64ToSpecific(RegI64 specific) {
+        freeI64(specific);
+        return popI64(specific);
+    }
+
+#ifdef JS_CODEGEN_ARM
+    RegI64 popI64Pair() {
+        RegI64 r = needI64Pair();
+        popI64ToSpecific(r);
+        return r;
+    }
+#endif
+
+    void pop2xF32(RegF32* r0, RegF32* r1) {
+        *r1 = popF32();
+        *r0 = popF32();
+    }
+
+    void pop2xF64(RegF64* r0, RegF64* r1) {
+        *r1 = popF64();
+        *r0 = popF64();
+    }
+
+    RegI32 popI64ToI32() {
+        RegI64 r = popI64();
+        return narrowI64(r);
+    }
+
+    RegI32 popI64ToSpecificI32(RegI32 specific) {
+        RegI64 x = widenI32(specific);
+        popI64ToSpecific(x);
+        return narrowI64(x);
+    }
+
+    void pushU32AsI64(RegI32 r) {
+        RegI64 x = widenI32(r);
+        masm.move32To64ZeroExtend(r, x);
+        pushI64(x);
+    }
+
+    RegI32 popMemoryAccess(MemoryAccessDesc* access, AccessCheck* check);
+
+    ////////////////////////////////////////////////////////////
+    //
+    // Sundry helpers.
+
+    uint32_t readCallSiteLineOrBytecode() {
+        if (!func_.callSiteLineNums.empty())
+            return func_.callSiteLineNums[lastReadCallSite_++];
+        return iter_.lastOpcodeOffset();
+    }
+
+    bool done() const {
+        return iter_.done();
+    }
+
+    BytecodeOffset bytecodeOffset() const {
+        return iter_.bytecodeOffset();
+    }
+
+    TrapDesc trap(Trap t) const {
+        return TrapDesc(bytecodeOffset(), t, masm.framePushed());
+    }
+
+    ////////////////////////////////////////////////////////////
+    //
+    // Machinery for optimized conditional branches.
+    //
+    // To disable this optimization it is enough always to return false from
+    // sniffConditionalControl{Cmp,Eqz}.
+
+    struct BranchState {
+        static const int32_t NoPop = ~0;
+
+        union {
+            struct {
+                RegI32 lhs;
+                RegI32 rhs;
+                int32_t imm;
+                bool rhsImm;
+            } i32;
+            struct {
+                RegI64 lhs;
+                RegI64 rhs;
+                int64_t imm;
+                bool rhsImm;
+            } i64;
+            struct {
+                RegF32 lhs;
+                RegF32 rhs;
+            } f32;
+            struct {
+                RegF64 lhs;
+                RegF64 rhs;
+            } f64;
+        };
+
+        Label* const label;        // The target of the branch, never NULL
+        const int32_t framePushed; // Either NoPop, or the value to pop to along the taken edge
+        const bool invertBranch;   // If true, invert the sense of the branch
+        const ExprType resultType; // The result propagated along the edges, or Void
+
+        explicit BranchState(Label* label, int32_t framePushed = NoPop,
+                             uint32_t invertBranch = false, ExprType resultType = ExprType::Void)
+          : label(label),
+            framePushed(framePushed),
+            invertBranch(invertBranch),
+            resultType(resultType)
+        {}
+    };
+
+    void setLatentCompare(Assembler::Condition compareOp, ValType operandType) {
+        latentOp_ = LatentOp::Compare;
+        latentType_ = operandType;
+        latentIntCmp_ = compareOp;
+    }
+
+    void setLatentCompare(Assembler::DoubleCondition compareOp, ValType operandType) {
+        latentOp_ = LatentOp::Compare;
+        latentType_ = operandType;
+        latentDoubleCmp_ = compareOp;
+    }
+
+    void setLatentEqz(ValType operandType) {
+        latentOp_ = LatentOp::Eqz;
+        latentType_ = operandType;
+    }
+
+    void resetLatentOp() {
+        latentOp_ = LatentOp::None;
+    }
+
+    void branchTo(Assembler::DoubleCondition c, RegF64 lhs, RegF64 rhs, Label* l) {
+        masm.branchDouble(c, lhs, rhs, l);
+    }
+
+    void branchTo(Assembler::DoubleCondition c, RegF32 lhs, RegF32 rhs, Label* l) {
+        masm.branchFloat(c, lhs, rhs, l);
+    }
+
+    void branchTo(Assembler::Condition c, RegI32 lhs, RegI32 rhs, Label* l) {
+        masm.branch32(c, lhs, rhs, l);
+    }
+
+    void branchTo(Assembler::Condition c, RegI32 lhs, Imm32 rhs, Label* l) {
+        masm.branch32(c, lhs, rhs, l);
+    }
+
+    void branchTo(Assembler::Condition c, RegI64 lhs, RegI64 rhs, Label* l) {
+        masm.branch64(c, lhs, rhs, l);
+    }
+
+    void branchTo(Assembler::Condition c, RegI64 lhs, Imm64 rhs, Label* l) {
+        masm.branch64(c, lhs, rhs, l);
+    }
+
+    // Emit a conditional branch that optionally and optimally cleans up the CPU
+    // stack before we branch.
+    //
+    // Cond is either Assembler::Condition or Assembler::DoubleCondition.
+    //
+    // Lhs is Register, Register64, or FloatRegister.
+    //
+    // Rhs is either the same as Lhs, or an immediate expression compatible with
+    // Lhs "when applicable".
+
+    template<typename Cond, typename Lhs, typename Rhs>
+    void jumpConditionalWithJoinReg(BranchState* b, Cond cond, Lhs lhs, Rhs rhs)
+    {
+        Maybe<AnyReg> r = popJoinRegUnlessVoid(b->resultType);
+
+        if (b->framePushed != BranchState::NoPop && willPopStackBeforeBranch(b->framePushed)) {
+            Label notTaken;
+            branchTo(b->invertBranch ? cond : Assembler::InvertCondition(cond), lhs, rhs, &notTaken);
+            popStackBeforeBranch(b->framePushed);
+            masm.jump(b->label);
+            masm.bind(&notTaken);
+        } else {
+            branchTo(b->invertBranch ? Assembler::InvertCondition(cond) : cond, lhs, rhs, b->label);
+        }
+
+        pushJoinRegUnlessVoid(r);
+    }
+
+    // sniffConditionalControl{Cmp,Eqz} may modify the latentWhatever_ state in
+    // the BaseCompiler so that a subsequent conditional branch can be compiled
+    // optimally.  emitBranchSetup() and emitBranchPerform() will consume that
+    // state.  If the latter methods are not called because deadCode_ is true
+    // then the compiler MUST instead call resetLatentOp() to reset the state.
+
+    template<typename Cond> bool sniffConditionalControlCmp(Cond compareOp, ValType operandType);
+    bool sniffConditionalControlEqz(ValType operandType);
+    void emitBranchSetup(BranchState* b);
+    void emitBranchPerform(BranchState* b);
+
+    //////////////////////////////////////////////////////////////////////
+
+    MOZ_MUST_USE bool emitBody();
+    MOZ_MUST_USE bool emitBlock();
+    MOZ_MUST_USE bool emitLoop();
+    MOZ_MUST_USE bool emitIf();
+    MOZ_MUST_USE bool emitElse();
+    MOZ_MUST_USE bool emitEnd();
+    MOZ_MUST_USE bool emitBr();
+    MOZ_MUST_USE bool emitBrIf();
+    MOZ_MUST_USE bool emitBrTable();
+    MOZ_MUST_USE bool emitDrop();
+    MOZ_MUST_USE bool emitReturn();
+    MOZ_MUST_USE bool emitCallArgs(const ValTypeVector& args, FunctionCall& baselineCall);
+    MOZ_MUST_USE bool emitCall();
+    MOZ_MUST_USE bool emitCallIndirect();
+    MOZ_MUST_USE bool emitUnaryMathBuiltinCall(SymbolicAddress callee, ValType operandType);
+    MOZ_MUST_USE bool emitGetLocal();
+    MOZ_MUST_USE bool emitSetLocal();
+    MOZ_MUST_USE bool emitTeeLocal();
+    MOZ_MUST_USE bool emitGetGlobal();
+    MOZ_MUST_USE bool emitSetGlobal();
+    MOZ_MUST_USE RegI32 maybeLoadTlsForAccess(const AccessCheck& check);
+    MOZ_MUST_USE bool emitLoad(ValType type, Scalar::Type viewType);
+    MOZ_MUST_USE bool loadCommon(MemoryAccessDesc* access, ValType type);
+    MOZ_MUST_USE bool emitStore(ValType resultType, Scalar::Type viewType);
+    MOZ_MUST_USE bool storeCommon(MemoryAccessDesc* access, ValType resultType);
+    MOZ_MUST_USE bool emitSelect();
+
+    // Mark these templates as inline to work around a compiler crash in
+    // gcc 4.8.5 when compiling for linux64-opt.
+
+    template<bool isSetLocal> MOZ_MUST_USE inline bool emitSetOrTeeLocal(uint32_t slot);
+
+    void endBlock(ExprType type);
+    void endLoop(ExprType type);
+    void endIfThen();
+    void endIfThenElse(ExprType type);
+
+    void doReturn(ExprType returnType, bool popStack);
+    void pushReturned(const FunctionCall& call, ExprType type);
+
+    void emitCompareI32(Assembler::Condition compareOp, ValType compareType);
+    void emitCompareI64(Assembler::Condition compareOp, ValType compareType);
+    void emitCompareF32(Assembler::DoubleCondition compareOp, ValType compareType);
+    void emitCompareF64(Assembler::DoubleCondition compareOp, ValType compareType);
+
+    void emitAddI32();
+    void emitAddI64();
+    void emitAddF64();
+    void emitAddF32();
+    void emitSubtractI32();
+    void emitSubtractI64();
+    void emitSubtractF32();
+    void emitSubtractF64();
+    void emitMultiplyI32();
+    void emitMultiplyI64();
+    void emitMultiplyF32();
+    void emitMultiplyF64();
+    void emitQuotientI32();
+    void emitQuotientU32();
+    void emitRemainderI32();
+    void emitRemainderU32();
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+    void emitDivOrModI64BuiltinCall(SymbolicAddress callee, ValType operandType);
+#else
+    void emitQuotientI64();
+    void emitQuotientU64();
+    void emitRemainderI64();
+    void emitRemainderU64();
+#endif
+    void emitDivideF32();
+    void emitDivideF64();
+    void emitMinF32();
+    void emitMaxF32();
+    void emitMinF64();
+    void emitMaxF64();
+    void emitCopysignF32();
+    void emitCopysignF64();
+    void emitOrI32();
+    void emitOrI64();
+    void emitAndI32();
+    void emitAndI64();
+    void emitXorI32();
+    void emitXorI64();
+    void emitShlI32();
+    void emitShlI64();
+    void emitShrI32();
+    void emitShrI64();
+    void emitShrU32();
+    void emitShrU64();
+    void emitRotrI32();
+    void emitRotrI64();
+    void emitRotlI32();
+    void emitRotlI64();
+    void emitEqzI32();
+    void emitEqzI64();
+    void emitClzI32();
+    void emitClzI64();
+    void emitCtzI32();
+    void emitCtzI64();
+    void emitPopcntI32();
+    void emitPopcntI64();
+    void emitAbsF32();
+    void emitAbsF64();
+    void emitNegateF32();
+    void emitNegateF64();
+    void emitSqrtF32();
+    void emitSqrtF64();
+    template<bool isUnsigned> MOZ_MUST_USE bool emitTruncateF32ToI32();
+    template<bool isUnsigned> MOZ_MUST_USE bool emitTruncateF64ToI32();
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+    MOZ_MUST_USE bool emitConvertFloatingToInt64Callout(SymbolicAddress callee, ValType operandType,
+                                                        ValType resultType);
+#else
+    template<bool isUnsigned> MOZ_MUST_USE bool emitTruncateF32ToI64();
+    template<bool isUnsigned> MOZ_MUST_USE bool emitTruncateF64ToI64();
+#endif
+    void emitWrapI64ToI32();
+    void emitExtendI32_8();
+    void emitExtendI32_16();
+    void emitExtendI64_8();
+    void emitExtendI64_16();
+    void emitExtendI64_32();
+    void emitExtendI32ToI64();
+    void emitExtendU32ToI64();
+    void emitReinterpretF32AsI32();
+    void emitReinterpretF64AsI64();
+    void emitConvertF64ToF32();
+    void emitConvertI32ToF32();
+    void emitConvertU32ToF32();
+    void emitConvertF32ToF64();
+    void emitConvertI32ToF64();
+    void emitConvertU32ToF64();
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+    MOZ_MUST_USE bool emitConvertInt64ToFloatingCallout(SymbolicAddress callee, ValType operandType,
+                                                        ValType resultType);
+#else
+    void emitConvertI64ToF32();
+    void emitConvertU64ToF32();
+    void emitConvertI64ToF64();
+    void emitConvertU64ToF64();
+#endif
+    void emitReinterpretI32AsF32();
+    void emitReinterpretI64AsF64();
+    void emitRound(RoundingMode roundingMode, ValType operandType);
+    void emitInstanceCall(uint32_t lineOrBytecode, const MIRTypeVector& sig,
+                          ExprType retType, SymbolicAddress builtin);
+    MOZ_MUST_USE bool emitGrowMemory();
+    MOZ_MUST_USE bool emitCurrentMemory();
+
+    MOZ_MUST_USE bool emitAtomicCmpXchg(ValType type, Scalar::Type viewType);
+    MOZ_MUST_USE bool emitAtomicLoad(ValType type, Scalar::Type viewType);
+    MOZ_MUST_USE bool emitAtomicRMW(ValType type, Scalar::Type viewType, AtomicOp op);
+    MOZ_MUST_USE bool emitAtomicStore(ValType type, Scalar::Type viewType);
+    MOZ_MUST_USE bool emitWait(ValType type, uint32_t byteSize);
+    MOZ_MUST_USE bool emitWake();
+    MOZ_MUST_USE bool emitAtomicXchg(ValType type, Scalar::Type viewType);
+};
+
+void
+BaseCompiler::emitAddI32()
+{
+    int32_t c;
+    if (popConstI32(&c)) {
+        RegI32 r = popI32();
+        masm.add32(Imm32(c), r);
+        pushI32(r);
+    } else {
+        RegI32 r0, r1;
+        pop2xI32(&r0, &r1);
+        masm.add32(r1, r0);
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitAddI64()
+{
+    int64_t c;
+    if (popConstI64(&c)) {
+        RegI64 r = popI64();
+        masm.add64(Imm64(c), r);
+        pushI64(r);
+    } else {
+        RegI64 r0, r1;
+        pop2xI64(&r0, &r1);
+        masm.add64(r1, r0);
+        freeI64(r1);
+        pushI64(r0);
+    }
+}
+
+void
+BaseCompiler::emitAddF64()
+{
+    RegF64 r0, r1;
+    pop2xF64(&r0, &r1);
+    masm.addDouble(r1, r0);
+    freeF64(r1);
+    pushF64(r0);
+}
+
+void
+BaseCompiler::emitAddF32()
+{
+    RegF32 r0, r1;
+    pop2xF32(&r0, &r1);
+    masm.addFloat32(r1, r0);
+    freeF32(r1);
+    pushF32(r0);
+}
+
+void
+BaseCompiler::emitSubtractI32()
+{
+    int32_t c;
+    if (popConstI32(&c)) {
+        RegI32 r = popI32();
+        masm.sub32(Imm32(c), r);
+        pushI32(r);
+    } else {
+        RegI32 r0, r1;
+        pop2xI32(&r0, &r1);
+        masm.sub32(r1, r0);
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitSubtractI64()
+{
+    int64_t c;
+    if (popConstI64(&c)) {
+        RegI64 r = popI64();
+        masm.sub64(Imm64(c), r);
+        pushI64(r);
+    } else {
+        RegI64 r0, r1;
+        pop2xI64(&r0, &r1);
+        masm.sub64(r1, r0);
+        freeI64(r1);
+        pushI64(r0);
+    }
+}
+
+void
+BaseCompiler::emitSubtractF32()
+{
+    RegF32 r0, r1;
+    pop2xF32(&r0, &r1);
+    masm.subFloat32(r1, r0);
+    freeF32(r1);
+    pushF32(r0);
+}
+
+void
+BaseCompiler::emitSubtractF64()
+{
+    RegF64 r0, r1;
+    pop2xF64(&r0, &r1);
+    masm.subDouble(r1, r0);
+    freeF64(r1);
+    pushF64(r0);
+}
+
+void
+BaseCompiler::emitMultiplyI32()
+{
+    RegI32 r0, r1;
+    pop2xI32ForIntMulDiv(&r0, &r1);
+    masm.mul32(r1, r0);
+    freeI32(r1);
+    pushI32(r0);
+}
+
+void
+BaseCompiler::emitMultiplyI64()
+{
+    RegI64 r0, r1;
+    RegI32 temp;
+#if defined(JS_CODEGEN_X64)
+    // srcDest must be rax, and rdx will be clobbered.
+    need2xI64(specific_rax, specific_rdx);
+    r1 = popI64();
+    r0 = popI64ToSpecific(specific_rax);
+    freeI64(specific_rdx);
+#elif defined(JS_CODEGEN_X86)
+    need2xI32(specific_eax, specific_edx);
+    r1 = popI64();
+    r0 = popI64ToSpecific(RegI64(Register64(specific_edx, specific_eax)));
+    temp = needI32();
+#else
+    pop2xI64(&r0, &r1);
+    temp = needI32();
+#endif
+    masm.mul64(r1, r0, temp);
+    maybeFreeI32(temp);
+    freeI64(r1);
+    pushI64(r0);
+}
+
+void
+BaseCompiler::emitMultiplyF32()
+{
+    RegF32 r0, r1;
+    pop2xF32(&r0, &r1);
+    masm.mulFloat32(r1, r0);
+    freeF32(r1);
+    pushF32(r0);
+}
+
+void
+BaseCompiler::emitMultiplyF64()
+{
+    RegF64 r0, r1;
+    pop2xF64(&r0, &r1);
+    masm.mulDouble(r1, r0);
+    freeF64(r1);
+    pushF64(r0);
+}
+
+void
+BaseCompiler::emitQuotientI32()
+{
+    int32_t c;
+    uint_fast8_t power;
+    if (popConstPositivePowerOfTwoI32(&c, &power, 0)) {
+        if (power != 0) {
+            RegI32 r = popI32();
+            Label positive;
+            masm.branchTest32(Assembler::NotSigned, r, r, &positive);
+            masm.add32(Imm32(c-1), r);
+            masm.bind(&positive);
+
+            masm.rshift32Arithmetic(Imm32(power & 31), r);
+            pushI32(r);
+        }
+    } else {
+        bool isConst = peekConstI32(&c);
+        RegI32 r0, r1;
+        pop2xI32ForIntMulDiv(&r0, &r1);
+
+        Label done;
+        if (!isConst || c == 0)
+            checkDivideByZeroI32(r1, r0, &done);
+        if (!isConst || c == -1)
+            checkDivideSignedOverflowI32(r1, r0, &done, ZeroOnOverflow(false));
+        masm.quotient32(r1, r0, IsUnsigned(false));
+        masm.bind(&done);
+
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitQuotientU32()
+{
+    int32_t c;
+    uint_fast8_t power;
+    if (popConstPositivePowerOfTwoI32(&c, &power, 0)) {
+        if (power != 0) {
+            RegI32 r = popI32();
+            masm.rshift32(Imm32(power & 31), r);
+            pushI32(r);
+        }
+    } else {
+        bool isConst = peekConstI32(&c);
+        RegI32 r0, r1;
+        pop2xI32ForIntMulDiv(&r0, &r1);
+
+        Label done;
+        if (!isConst || c == 0)
+            checkDivideByZeroI32(r1, r0, &done);
+        masm.quotient32(r1, r0, IsUnsigned(true));
+        masm.bind(&done);
+
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitRemainderI32()
+{
+    int32_t c;
+    uint_fast8_t power;
+    if (popConstPositivePowerOfTwoI32(&c, &power, 1)) {
+        RegI32 r = popI32();
+        RegI32 temp = needI32();
+        moveI32(r, temp);
+
+        Label positive;
+        masm.branchTest32(Assembler::NotSigned, temp, temp, &positive);
+        masm.add32(Imm32(c-1), temp);
+        masm.bind(&positive);
+
+        masm.rshift32Arithmetic(Imm32(power & 31), temp);
+        masm.lshift32(Imm32(power & 31), temp);
+        masm.sub32(temp, r);
+        freeI32(temp);
+
+        pushI32(r);
+    } else {
+        bool isConst = peekConstI32(&c);
+        RegI32 r0, r1;
+        pop2xI32ForIntMulDiv(&r0, &r1);
+
+        Label done;
+        if (!isConst || c == 0)
+            checkDivideByZeroI32(r1, r0, &done);
+        if (!isConst || c == -1)
+            checkDivideSignedOverflowI32(r1, r0, &done, ZeroOnOverflow(true));
+        masm.remainder32(r1, r0, IsUnsigned(false));
+        masm.bind(&done);
+
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitRemainderU32()
+{
+    int32_t c;
+    uint_fast8_t power;
+    if (popConstPositivePowerOfTwoI32(&c, &power, 1)) {
+        RegI32 r = popI32();
+        masm.and32(Imm32(c-1), r);
+        pushI32(r);
+    } else {
+        bool isConst = peekConstI32(&c);
+        RegI32 r0, r1;
+        pop2xI32ForIntMulDiv(&r0, &r1);
+
+        Label done;
+        if (!isConst || c == 0)
+            checkDivideByZeroI32(r1, r0, &done);
+        masm.remainder32(r1, r0, IsUnsigned(true));
+        masm.bind(&done);
+
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+#ifndef RABALDR_INT_DIV_I64_CALLOUT
+void
+BaseCompiler::emitQuotientI64()
+{
+# ifdef JS_PUNBOX64
+    int64_t c;
+    uint_fast8_t power;
+    if (popConstPositivePowerOfTwoI64(&c, &power, 0)) {
+        if (power != 0) {
+            RegI64 r = popI64();
+            Label positive;
+            masm.branchTest64(Assembler::NotSigned, r, r, Register::Invalid(),
+                              &positive);
+            masm.add64(Imm64(c-1), r);
+            masm.bind(&positive);
+
+            masm.rshift64Arithmetic(Imm32(power & 63), r);
+            pushI64(r);
+        }
+    } else {
+        bool isConst = peekConstI64(&c);
+        RegI64 r0, r1;
+        pop2xI64ForIntDiv(&r0, &r1);
+        quotientI64(r1, r0, IsUnsigned(false), isConst, c);
+        freeI64(r1);
+        pushI64(r0);
+    }
+# else
+    MOZ_CRASH("BaseCompiler platform hook: emitQuotientI64");
+# endif
+}
+
+void
+BaseCompiler::emitQuotientU64()
+{
+# ifdef JS_PUNBOX64
+    int64_t c;
+    uint_fast8_t power;
+    if (popConstPositivePowerOfTwoI64(&c, &power, 0)) {
+        if (power != 0) {
+            RegI64 r = popI64();
+            masm.rshift64(Imm32(power & 63), r);
+            pushI64(r);
+        }
+    } else {
+        bool isConst = peekConstI64(&c);
+        RegI64 r0, r1;
+        pop2xI64ForIntDiv(&r0, &r1);
+        quotientI64(r1, r0, IsUnsigned(true), isConst, c);
+        freeI64(r1);
+        pushI64(r0);
+    }
+# else
+    MOZ_CRASH("BaseCompiler platform hook: emitQuotientU64");
+# endif
+}
+
+void
+BaseCompiler::emitRemainderI64()
+{
+# ifdef JS_PUNBOX64
+    int64_t c;
+    uint_fast8_t power;
+    if (popConstPositivePowerOfTwoI64(&c, &power, 1)) {
+        RegI64 r = popI64();
+        RegI64 temp = needI64();
+        moveI64(r, temp);
+
+        Label positive;
+        masm.branchTest64(Assembler::NotSigned, temp, temp,
+                          Register::Invalid(), &positive);
+        masm.add64(Imm64(c-1), temp);
+        masm.bind(&positive);
+
+        masm.rshift64Arithmetic(Imm32(power & 63), temp);
+        masm.lshift64(Imm32(power & 63), temp);
+        masm.sub64(temp, r);
+        freeI64(temp);
+
+        pushI64(r);
+    } else {
+        bool isConst = peekConstI64(&c);
+        RegI64 r0, r1;
+        pop2xI64ForIntDiv(&r0, &r1);
+        remainderI64(r1, r0, IsUnsigned(false), isConst, c);
+        freeI64(r1);
+        pushI64(r0);
+    }
+# else
+    MOZ_CRASH("BaseCompiler platform hook: emitRemainderI64");
+# endif
+}
+
+void
+BaseCompiler::emitRemainderU64()
+{
+# ifdef JS_PUNBOX64
+    int64_t c;
+    uint_fast8_t power;
+    if (popConstPositivePowerOfTwoI64(&c, &power, 1)) {
+        RegI64 r = popI64();
+        masm.and64(Imm64(c-1), r);
+        pushI64(r);
+    } else {
+        bool isConst = peekConstI64(&c);
+        RegI64 r0, r1;
+        pop2xI64ForIntDiv(&r0, &r1);
+        remainderI64(r1, r0, IsUnsigned(true), isConst, c);
+        freeI64(r1);
+        pushI64(r0);
+    }
+# else
+    MOZ_CRASH("BaseCompiler platform hook: emitRemainderU64");
+# endif
+}
+#endif // RABALDR_INT_DIV_I64_CALLOUT
+
+void
+BaseCompiler::emitDivideF32()
+{
+    RegF32 r0, r1;
+    pop2xF32(&r0, &r1);
+    masm.divFloat32(r1, r0);
+    freeF32(r1);
+    pushF32(r0);
+}
+
+void
+BaseCompiler::emitDivideF64()
+{
+    RegF64 r0, r1;
+    pop2xF64(&r0, &r1);
+    masm.divDouble(r1, r0);
+    freeF64(r1);
+    pushF64(r0);
+}
+
+void
+BaseCompiler::emitMinF32()
+{
+    RegF32 r0, r1;
+    pop2xF32(&r0, &r1);
+    // Convert signaling NaN to quiet NaNs.
+    //
+    // TODO / OPTIMIZE (bug 1316824): Don't do this if one of the operands
+    // is known to be a constant.
+    ScratchF32 zero(*this);
+    masm.loadConstantFloat32(0.f, zero);
+    masm.subFloat32(zero, r0);
+    masm.subFloat32(zero, r1);
+    masm.minFloat32(r1, r0, HandleNaNSpecially(true));
+    freeF32(r1);
+    pushF32(r0);
+}
+
+void
+BaseCompiler::emitMaxF32()
+{
+    RegF32 r0, r1;
+    pop2xF32(&r0, &r1);
+    // Convert signaling NaN to quiet NaNs.
+    //
+    // TODO / OPTIMIZE (bug 1316824): see comment in emitMinF32.
+    ScratchF32 zero(*this);
+    masm.loadConstantFloat32(0.f, zero);
+    masm.subFloat32(zero, r0);
+    masm.subFloat32(zero, r1);
+    masm.maxFloat32(r1, r0, HandleNaNSpecially(true));
+    freeF32(r1);
+    pushF32(r0);
+}
+
+void
+BaseCompiler::emitMinF64()
+{
+    RegF64 r0, r1;
+    pop2xF64(&r0, &r1);
+    // Convert signaling NaN to quiet NaNs.
+    //
+    // TODO / OPTIMIZE (bug 1316824): see comment in emitMinF32.
+    ScratchF64 zero(*this);
+    masm.loadConstantDouble(0, zero);
+    masm.subDouble(zero, r0);
+    masm.subDouble(zero, r1);
+    masm.minDouble(r1, r0, HandleNaNSpecially(true));
+    freeF64(r1);
+    pushF64(r0);
+}
+
+void
+BaseCompiler::emitMaxF64()
+{
+    RegF64 r0, r1;
+    pop2xF64(&r0, &r1);
+    // Convert signaling NaN to quiet NaNs.
+    //
+    // TODO / OPTIMIZE (bug 1316824): see comment in emitMinF32.
+    ScratchF64 zero(*this);
+    masm.loadConstantDouble(0, zero);
+    masm.subDouble(zero, r0);
+    masm.subDouble(zero, r1);
+    masm.maxDouble(r1, r0, HandleNaNSpecially(true));
+    freeF64(r1);
+    pushF64(r0);
+}
+
+void
+BaseCompiler::emitCopysignF32()
+{
+    RegF32 r0, r1;
+    pop2xF32(&r0, &r1);
+    RegI32 i0 = needI32();
+    RegI32 i1 = needI32();
+    masm.moveFloat32ToGPR(r0, i0);
+    masm.moveFloat32ToGPR(r1, i1);
+    masm.and32(Imm32(INT32_MAX), i0);
+    masm.and32(Imm32(INT32_MIN), i1);
+    masm.or32(i1, i0);
+    masm.moveGPRToFloat32(i0, r0);
+    freeI32(i0);
+    freeI32(i1);
+    freeF32(r1);
+    pushF32(r0);
+}
+
+void
+BaseCompiler::emitCopysignF64()
+{
+    RegF64 r0, r1;
+    pop2xF64(&r0, &r1);
+    RegI64 x0 = needI64();
+    RegI64 x1 = needI64();
+    masm.moveDoubleToGPR64(r0, x0);
+    masm.moveDoubleToGPR64(r1, x1);
+    masm.and64(Imm64(INT64_MAX), x0);
+    masm.and64(Imm64(INT64_MIN), x1);
+    masm.or64(x1, x0);
+    masm.moveGPR64ToDouble(x0, r0);
+    freeI64(x0);
+    freeI64(x1);
+    freeF64(r1);
+    pushF64(r0);
+}
+
+void
+BaseCompiler::emitOrI32()
+{
+    int32_t c;
+    if (popConstI32(&c)) {
+        RegI32 r = popI32();
+        masm.or32(Imm32(c), r);
+        pushI32(r);
+    } else {
+        RegI32 r0, r1;
+        pop2xI32(&r0, &r1);
+        masm.or32(r1, r0);
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitOrI64()
+{
+    int64_t c;
+    if (popConstI64(&c)) {
+        RegI64 r = popI64();
+        masm.or64(Imm64(c), r);
+        pushI64(r);
+    } else {
+        RegI64 r0, r1;
+        pop2xI64(&r0, &r1);
+        masm.or64(r1, r0);
+        freeI64(r1);
+        pushI64(r0);
+    }
+}
+
+void
+BaseCompiler::emitAndI32()
+{
+    int32_t c;
+    if (popConstI32(&c)) {
+        RegI32 r = popI32();
+        masm.and32(Imm32(c), r);
+        pushI32(r);
+    } else {
+        RegI32 r0, r1;
+        pop2xI32(&r0, &r1);
+        masm.and32(r1, r0);
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitAndI64()
+{
+    int64_t c;
+    if (popConstI64(&c)) {
+        RegI64 r = popI64();
+        masm.and64(Imm64(c), r);
+        pushI64(r);
+    } else {
+        RegI64 r0, r1;
+        pop2xI64(&r0, &r1);
+        masm.and64(r1, r0);
+        freeI64(r1);
+        pushI64(r0);
+    }
+}
+
+void
+BaseCompiler::emitXorI32()
+{
+    int32_t c;
+    if (popConstI32(&c)) {
+        RegI32 r = popI32();
+        masm.xor32(Imm32(c), r);
+        pushI32(r);
+    } else {
+        RegI32 r0, r1;
+        pop2xI32(&r0, &r1);
+        masm.xor32(r1, r0);
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitXorI64()
+{
+    int64_t c;
+    if (popConstI64(&c)) {
+        RegI64 r = popI64();
+        masm.xor64(Imm64(c), r);
+        pushI64(r);
+    } else {
+        RegI64 r0, r1;
+        pop2xI64(&r0, &r1);
+        masm.xor64(r1, r0);
+        freeI64(r1);
+        pushI64(r0);
+    }
+}
+
+void
+BaseCompiler::emitShlI32()
+{
+    int32_t c;
+    if (popConstI32(&c)) {
+        RegI32 r = popI32();
+        masm.lshift32(Imm32(c & 31), r);
+        pushI32(r);
+    } else {
+        RegI32 r0, r1;
+        pop2xI32ForShiftOrRotate(&r0, &r1);
+        maskShiftCount32(r1);
+        masm.lshift32(r1, r0);
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitShlI64()
+{
+    int64_t c;
+    if (popConstI64(&c)) {
+        RegI64 r = popI64();
+        masm.lshift64(Imm32(c & 63), r);
+        pushI64(r);
+    } else {
+        RegI64 r0, r1;
+        pop2xI64ForShiftOrRotate(&r0, &r1);
+        masm.lshift64(lowPart(r1), r0);
+        freeI64(r1);
+        pushI64(r0);
+    }
+}
+
+void
+BaseCompiler::emitShrI32()
+{
+    int32_t c;
+    if (popConstI32(&c)) {
+        RegI32 r = popI32();
+        masm.rshift32Arithmetic(Imm32(c & 31), r);
+        pushI32(r);
+    } else {
+        RegI32 r0, r1;
+        pop2xI32ForShiftOrRotate(&r0, &r1);
+        maskShiftCount32(r1);
+        masm.rshift32Arithmetic(r1, r0);
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitShrI64()
+{
+    int64_t c;
+    if (popConstI64(&c)) {
+        RegI64 r = popI64();
+        masm.rshift64Arithmetic(Imm32(c & 63), r);
+        pushI64(r);
+    } else {
+        RegI64 r0, r1;
+        pop2xI64ForShiftOrRotate(&r0, &r1);
+        masm.rshift64Arithmetic(lowPart(r1), r0);
+        freeI64(r1);
+        pushI64(r0);
+    }
+}
+
+void
+BaseCompiler::emitShrU32()
+{
+    int32_t c;
+    if (popConstI32(&c)) {
+        RegI32 r = popI32();
+        masm.rshift32(Imm32(c & 31), r);
+        pushI32(r);
+    } else {
+        RegI32 r0, r1;
+        pop2xI32ForShiftOrRotate(&r0, &r1);
+        maskShiftCount32(r1);
+        masm.rshift32(r1, r0);
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitShrU64()
+{
+    int64_t c;
+    if (popConstI64(&c)) {
+        RegI64 r = popI64();
+        masm.rshift64(Imm32(c & 63), r);
+        pushI64(r);
+    } else {
+        RegI64 r0, r1;
+        pop2xI64ForShiftOrRotate(&r0, &r1);
+        masm.rshift64(lowPart(r1), r0);
+        freeI64(r1);
+        pushI64(r0);
+    }
+}
+
+void
+BaseCompiler::emitRotrI32()
+{
+    int32_t c;
+    if (popConstI32(&c)) {
+        RegI32 r = popI32();
+        masm.rotateRight(Imm32(c & 31), r, r);
+        pushI32(r);
+    } else {
+        RegI32 r0, r1;
+        pop2xI32ForShiftOrRotate(&r0, &r1);
+        masm.rotateRight(r1, r0, r0);
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitRotrI64()
+{
+    int64_t c;
+    if (popConstI64(&c)) {
+        RegI64 r = popI64();
+        RegI32 temp;
+        if (rotate64NeedsTemp())
+            temp = needI32();
+        masm.rotateRight64(Imm32(c & 63), r, r, temp);
+        maybeFreeI32(temp);
+        pushI64(r);
+    } else {
+        RegI64 r0, r1;
+        pop2xI64ForShiftOrRotate(&r0, &r1);
+        masm.rotateRight64(lowPart(r1), r0, r0, maybeHighPart(r1));
+        freeI64(r1);
+        pushI64(r0);
+    }
+}
+
+void
+BaseCompiler::emitRotlI32()
+{
+    int32_t c;
+    if (popConstI32(&c)) {
+        RegI32 r = popI32();
+        masm.rotateLeft(Imm32(c & 31), r, r);
+        pushI32(r);
+    } else {
+        RegI32 r0, r1;
+        pop2xI32ForShiftOrRotate(&r0, &r1);
+        masm.rotateLeft(r1, r0, r0);
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitRotlI64()
+{
+    int64_t c;
+    if (popConstI64(&c)) {
+        RegI64 r = popI64();
+        RegI32 temp;
+        if (rotate64NeedsTemp())
+            temp = needI32();
+        masm.rotateLeft64(Imm32(c & 63), r, r, temp);
+        maybeFreeI32(temp);
+        pushI64(r);
+    } else {
+        RegI64 r0, r1;
+        pop2xI64ForShiftOrRotate(&r0, &r1);
+        masm.rotateLeft64(lowPart(r1), r0, r0, maybeHighPart(r1));
+        freeI64(r1);
+        pushI64(r0);
+    }
+}
+
+void
+BaseCompiler::emitEqzI32()
+{
+    if (sniffConditionalControlEqz(ValType::I32))
+        return;
+
+    RegI32 r0 = popI32();
+    masm.cmp32Set(Assembler::Equal, r0, Imm32(0), r0);
+    pushI32(r0);
+}
+
+void
+BaseCompiler::emitEqzI64()
+{
+    if (sniffConditionalControlEqz(ValType::I64))
+        return;
+
+    RegI64 r0 = popI64();
+    RegI32 i0 = fromI64(r0);
+    eqz64(r0, i0);
+    freeI64Except(r0, i0);
+    pushI32(i0);
+}
+
+void
+BaseCompiler::emitClzI32()
+{
+    RegI32 r0 = popI32();
+    masm.clz32(r0, r0, IsKnownNotZero(false));
+    pushI32(r0);
+}
+
+void
+BaseCompiler::emitClzI64()
+{
+    RegI64 r0 = popI64();
+    masm.clz64(r0, lowPart(r0));
+    maybeClearHighPart(r0);
+    pushI64(r0);
+}
+
+void
+BaseCompiler::emitCtzI32()
+{
+    RegI32 r0 = popI32();
+    masm.ctz32(r0, r0, IsKnownNotZero(false));
+    pushI32(r0);
+}
+
+void
+BaseCompiler::emitCtzI64()
+{
+    RegI64 r0 = popI64();
+    masm.ctz64(r0, lowPart(r0));
+    maybeClearHighPart(r0);
+    pushI64(r0);
+}
+
+void
+BaseCompiler::emitPopcntI32()
+{
+    RegI32 r0 = popI32();
+    if (popcnt32NeedsTemp()) {
+        RegI32 tmp = needI32();
+        masm.popcnt32(r0, r0, tmp);
+        freeI32(tmp);
+    } else {
+        masm.popcnt32(r0, r0, invalidI32());
+    }
+    pushI32(r0);
+}
+
+void
+BaseCompiler::emitPopcntI64()
+{
+    RegI64 r0 = popI64();
+    if (popcnt64NeedsTemp()) {
+        RegI32 tmp = needI32();
+        masm.popcnt64(r0, r0, tmp);
+        freeI32(tmp);
+    } else {
+        masm.popcnt64(r0, r0, invalidI32());
+    }
+    pushI64(r0);
+}
+
+void
+BaseCompiler::emitAbsF32()
+{
+    RegF32 r0 = popF32();
+    masm.absFloat32(r0, r0);
+    pushF32(r0);
+}
+
+void
+BaseCompiler::emitAbsF64()
+{
+    RegF64 r0 = popF64();
+    masm.absDouble(r0, r0);
+    pushF64(r0);
+}
+
+void
+BaseCompiler::emitNegateF32()
+{
+    RegF32 r0 = popF32();
+    masm.negateFloat(r0);
+    pushF32(r0);
+}
+
+void
+BaseCompiler::emitNegateF64()
+{
+    RegF64 r0 = popF64();
+    masm.negateDouble(r0);
+    pushF64(r0);
+}
+
+void
+BaseCompiler::emitSqrtF32()
+{
+    RegF32 r0 = popF32();
+    masm.sqrtFloat32(r0, r0);
+    pushF32(r0);
+}
+
+void
+BaseCompiler::emitSqrtF64()
+{
+    RegF64 r0 = popF64();
+    masm.sqrtDouble(r0, r0);
+    pushF64(r0);
+}
+
+template<bool isUnsigned>
+bool
+BaseCompiler::emitTruncateF32ToI32()
+{
+    RegF32 r0 = popF32();
+    RegI32 i0 = needI32();
+    if (!truncateF32ToI32(r0, i0, isUnsigned))
+        return false;
+    freeF32(r0);
+    pushI32(i0);
+    return true;
+}
+
+template<bool isUnsigned>
+bool
+BaseCompiler::emitTruncateF64ToI32()
+{
+    RegF64 r0 = popF64();
+    RegI32 i0 = needI32();
+    if (!truncateF64ToI32(r0, i0, isUnsigned))
+        return false;
+    freeF64(r0);
+    pushI32(i0);
+    return true;
+}
+
+#ifndef RABALDR_FLOAT_TO_I64_CALLOUT
+template<bool isUnsigned>
+bool
+BaseCompiler::emitTruncateF32ToI64()
+{
+    RegF32 r0 = popF32();
+    RegI64 x0 = needI64();
+    if (isUnsigned) {
+        RegF64 tmp = needF64();
+        if (!truncateF32ToI64(r0, x0, isUnsigned, tmp))
+            return false;
+        freeF64(tmp);
+    } else {
+        if (!truncateF32ToI64(r0, x0, isUnsigned, invalidF64()))
+            return false;
+    }
+    freeF32(r0);
+    pushI64(x0);
+    return true;
+}
+
+template<bool isUnsigned>
+bool
+BaseCompiler::emitTruncateF64ToI64()
+{
+    RegF64 r0 = popF64();
+    RegI64 x0 = needI64();
+    if (isUnsigned) {
+        RegF64 tmp = needF64();
+        if (!truncateF64ToI64(r0, x0, isUnsigned, tmp))
+            return false;
+        freeF64(tmp);
+    } else {
+        if (!truncateF64ToI64(r0, x0, isUnsigned, invalidF64()))
+            return false;
+    }
+    freeF64(r0);
+    pushI64(x0);
+    return true;
+}
+#endif // RABALDR_FLOAT_TO_I64_CALLOUT
+
+void
+BaseCompiler::emitWrapI64ToI32()
+{
+    RegI64 r0 = popI64();
+    RegI32 i0 = fromI64(r0);
+    masm.move64To32(r0, i0);
+    freeI64Except(r0, i0);
+    pushI32(i0);
+}
+
+void
+BaseCompiler::emitExtendI32_8()
+{
+    RegI32 r = popI32();
+    masm.move8SignExtend(r, r);
+    pushI32(r);
+}
+
+void
+BaseCompiler::emitExtendI32_16()
+{
+    RegI32 r = popI32();
+    masm.move16SignExtend(r, r);
+    pushI32(r);
+}
+
+void
+BaseCompiler::emitExtendI64_8()
+{
+    RegI64 r = popI64ForSignExtendI64();
+    masm.move8To64SignExtend(lowPart(r), r);
+    pushI64(r);
+}
+
+void
+BaseCompiler::emitExtendI64_16()
+{
+    RegI64 r = popI64ForSignExtendI64();
+    masm.move16To64SignExtend(lowPart(r), r);
+    pushI64(r);
+}
+
+void
+BaseCompiler::emitExtendI64_32()
+{
+    RegI64 x0 = popI64ForSignExtendI64();
+    masm.move32To64SignExtend(lowPart(x0), x0);
+    pushI64(x0);
+}
+
+void
+BaseCompiler::emitExtendI32ToI64()
+{
+    RegI64 x0 = popI32ForSignExtendI64();
+    masm.move32To64SignExtend(lowPart(x0), x0);
+    pushI64(x0);
+}
+
+void
+BaseCompiler::emitExtendU32ToI64()
+{
+    RegI32 r0 = popI32();
+    RegI64 x0 = widenI32(r0);
+    masm.move32To64ZeroExtend(r0, x0);
+    pushI64(x0);
+}
+
+void
+BaseCompiler::emitReinterpretF32AsI32()
+{
+    RegF32 r0 = popF32();
+    RegI32 i0 = needI32();
+    masm.moveFloat32ToGPR(r0, i0);
+    freeF32(r0);
+    pushI32(i0);
+}
+
+void
+BaseCompiler::emitReinterpretF64AsI64()
+{
+    RegF64 r0 = popF64();
+    RegI64 x0 = needI64();
+    masm.moveDoubleToGPR64(r0, x0);
+    freeF64(r0);
+    pushI64(x0);
+}
+
+void
+BaseCompiler::emitConvertF64ToF32()
+{
+    RegF64 r0 = popF64();
+    RegF32 f0 = needF32();
+    masm.convertDoubleToFloat32(r0, f0);
+    freeF64(r0);
+    pushF32(f0);
+}
+
+void
+BaseCompiler::emitConvertI32ToF32()
+{
+    RegI32 r0 = popI32();
+    RegF32 f0 = needF32();
+    masm.convertInt32ToFloat32(r0, f0);
+    freeI32(r0);
+    pushF32(f0);
+}
+
+void
+BaseCompiler::emitConvertU32ToF32()
+{
+    RegI32 r0 = popI32();
+    RegF32 f0 = needF32();
+    masm.convertUInt32ToFloat32(r0, f0);
+    freeI32(r0);
+    pushF32(f0);
+}
+
+#ifndef RABALDR_I64_TO_FLOAT_CALLOUT
+void
+BaseCompiler::emitConvertI64ToF32()
+{
+    RegI64 r0 = popI64();
+    RegF32 f0 = needF32();
+    convertI64ToF32(r0, IsUnsigned(false), f0, RegI32());
+    freeI64(r0);
+    pushF32(f0);
+}
+
+void
+BaseCompiler::emitConvertU64ToF32()
+{
+    RegI64 r0 = popI64();
+    RegF32 f0 = needF32();
+    RegI32 temp;
+    if (convertI64ToFloatNeedsTemp(ValType::F32, IsUnsigned(true)))
+        temp = needI32();
+    convertI64ToF32(r0, IsUnsigned(true), f0, temp);
+    maybeFreeI32(temp);
+    freeI64(r0);
+    pushF32(f0);
+}
+#endif
+
+void
+BaseCompiler::emitConvertF32ToF64()
+{
+    RegF32 r0 = popF32();
+    RegF64 d0 = needF64();
+    masm.convertFloat32ToDouble(r0, d0);
+    freeF32(r0);
+    pushF64(d0);
+}
+
+void
+BaseCompiler::emitConvertI32ToF64()
+{
+    RegI32 r0 = popI32();
+    RegF64 d0 = needF64();
+    masm.convertInt32ToDouble(r0, d0);
+    freeI32(r0);
+    pushF64(d0);
+}
+
+void
+BaseCompiler::emitConvertU32ToF64()
+{
+    RegI32 r0 = popI32();
+    RegF64 d0 = needF64();
+    masm.convertUInt32ToDouble(r0, d0);
+    freeI32(r0);
+    pushF64(d0);
+}
+
+#ifndef RABALDR_I64_TO_FLOAT_CALLOUT
+void
+BaseCompiler::emitConvertI64ToF64()
+{
+    RegI64 r0 = popI64();
+    RegF64 d0 = needF64();
+    convertI64ToF64(r0, IsUnsigned(false), d0, RegI32());
+    freeI64(r0);
+    pushF64(d0);
+}
+
+void
+BaseCompiler::emitConvertU64ToF64()
+{
+    RegI64 r0 = popI64();
+    RegF64 d0 = needF64();
+    RegI32 temp;
+    if (convertI64ToFloatNeedsTemp(ValType::F64, IsUnsigned(true)))
+        temp = needI32();
+    convertI64ToF64(r0, IsUnsigned(true), d0, temp);
+    maybeFreeI32(temp);
+    freeI64(r0);
+    pushF64(d0);
+}
+#endif // RABALDR_I64_TO_FLOAT_CALLOUT
+
+void
+BaseCompiler::emitReinterpretI32AsF32()
+{
+    RegI32 r0 = popI32();
+    RegF32 f0 = needF32();
+    masm.moveGPRToFloat32(r0, f0);
+    freeI32(r0);
+    pushF32(f0);
+}
+
+void
+BaseCompiler::emitReinterpretI64AsF64()
+{
+    RegI64 r0 = popI64();
+    RegF64 d0 = needF64();
+    masm.moveGPR64ToDouble(r0, d0);
+    freeI64(r0);
+    pushF64(d0);
+}
+
+template<typename Cond>
+bool
+BaseCompiler::sniffConditionalControlCmp(Cond compareOp, ValType operandType)
+{
+    MOZ_ASSERT(latentOp_ == LatentOp::None, "Latent comparison state not properly reset");
+
+#ifdef JS_CODEGEN_X86
+    // On x86, latent i64 binary comparisons use too many registers: the
+    // reserved join register and the lhs and rhs operands require six, but we
+    // only have five.
+    if (operandType == ValType::I64)
+        return false;
+#endif
+
+    OpBytes op;
+    iter_.peekOp(&op);
+    switch (op.b0) {
+      case uint16_t(Op::Select):
+        MOZ_FALLTHROUGH;
+      case uint16_t(Op::BrIf):
+      case uint16_t(Op::If):
+        setLatentCompare(compareOp, operandType);
+        return true;
+      default:
+        return false;
+    }
+}
+
+bool
+BaseCompiler::sniffConditionalControlEqz(ValType operandType)
+{
+    MOZ_ASSERT(latentOp_ == LatentOp::None, "Latent comparison state not properly reset");
+
+    OpBytes op;
+    iter_.peekOp(&op);
+    switch (op.b0) {
+      case uint16_t(Op::BrIf):
+      case uint16_t(Op::Select):
+      case uint16_t(Op::If):
+        setLatentEqz(operandType);
+        return true;
+      default:
+        return false;
+    }
+}
+
+void
+BaseCompiler::emitBranchSetup(BranchState* b)
+{
+    maybeReserveJoinReg(b->resultType);
+
+    // Set up fields so that emitBranchPerform() need not switch on latentOp_.
+    switch (latentOp_) {
+      case LatentOp::None: {
+        latentIntCmp_ = Assembler::NotEqual;
+        latentType_ = ValType::I32;
+        b->i32.lhs = popI32();
+        b->i32.rhsImm = true;
+        b->i32.imm = 0;
+        break;
+      }
+      case LatentOp::Compare: {
+        switch (latentType_) {
+          case ValType::I32: {
+            if (popConstI32(&b->i32.imm)) {
+                b->i32.lhs = popI32();
+                b->i32.rhsImm = true;
+            } else {
+                pop2xI32(&b->i32.lhs, &b->i32.rhs);
+                b->i32.rhsImm = false;
+            }
+            break;
+          }
+          case ValType::I64: {
+            pop2xI64(&b->i64.lhs, &b->i64.rhs);
+            b->i64.rhsImm = false;
+            break;
+          }
+          case ValType::F32: {
+            pop2xF32(&b->f32.lhs, &b->f32.rhs);
+            break;
+          }
+          case ValType::F64: {
+            pop2xF64(&b->f64.lhs, &b->f64.rhs);
+            break;
+          }
+          default: {
+            MOZ_CRASH("Unexpected type for LatentOp::Compare");
+          }
+        }
+        break;
+      }
+      case LatentOp::Eqz: {
+        switch (latentType_) {
+          case ValType::I32: {
+            latentIntCmp_ = Assembler::Equal;
+            b->i32.lhs = popI32();
+            b->i32.rhsImm = true;
+            b->i32.imm = 0;
+            break;
+          }
+          case ValType::I64: {
+            latentIntCmp_ = Assembler::Equal;
+            b->i64.lhs = popI64();
+            b->i64.rhsImm = true;
+            b->i64.imm = 0;
+            break;
+          }
+          default: {
+            MOZ_CRASH("Unexpected type for LatentOp::Eqz");
+          }
+        }
+        break;
+      }
+    }
+
+    maybeUnreserveJoinReg(b->resultType);
+}
+
+void
+BaseCompiler::emitBranchPerform(BranchState* b)
+{
+    switch (latentType_) {
+      case ValType::I32: {
+        if (b->i32.rhsImm) {
+            jumpConditionalWithJoinReg(b, latentIntCmp_, b->i32.lhs, Imm32(b->i32.imm));
+        } else {
+            jumpConditionalWithJoinReg(b, latentIntCmp_, b->i32.lhs, b->i32.rhs);
+            freeI32(b->i32.rhs);
+        }
+        freeI32(b->i32.lhs);
+        break;
+      }
+      case ValType::I64: {
+        if (b->i64.rhsImm) {
+            jumpConditionalWithJoinReg(b, latentIntCmp_, b->i64.lhs, Imm64(b->i64.imm));
+        } else {
+            jumpConditionalWithJoinReg(b, latentIntCmp_, b->i64.lhs, b->i64.rhs);
+            freeI64(b->i64.rhs);
+        }
+        freeI64(b->i64.lhs);
+        break;
+      }
+      case ValType::F32: {
+        jumpConditionalWithJoinReg(b, latentDoubleCmp_, b->f32.lhs, b->f32.rhs);
+        freeF32(b->f32.lhs);
+        freeF32(b->f32.rhs);
+        break;
+      }
+      case ValType::F64: {
+        jumpConditionalWithJoinReg(b, latentDoubleCmp_, b->f64.lhs, b->f64.rhs);
+        freeF64(b->f64.lhs);
+        freeF64(b->f64.rhs);
+        break;
+      }
+      default: {
+        MOZ_CRASH("Unexpected type for LatentOp::Compare");
+      }
+    }
+    resetLatentOp();
+}
+
+// For blocks and loops and ifs:
+//
+//  - Sync the value stack before going into the block in order to simplify exit
+//    from the block: all exits from the block can assume that there are no
+//    live registers except the one carrying the exit value.
+//  - The block can accumulate a number of dead values on the stacks, so when
+//    branching out of the block or falling out at the end be sure to
+//    pop the appropriate stacks back to where they were on entry, while
+//    preserving the exit value.
+//  - A continue branch in a loop is much like an exit branch, but the branch
+//    value must not be preserved.
+//  - The exit value is always in a designated join register (type dependent).
+
+bool
+BaseCompiler::emitBlock()
+{
+    if (!iter_.readBlock())
+        return false;
+
+    if (!deadCode_)
+        sync();                    // Simplifies branching out from block
+
+    initControl(controlItem());
+
+    return true;
+}
+
+void
+BaseCompiler::endBlock(ExprType type)
+{
+    Control& block = controlItem();
+
+    // Save the value.
+    Maybe<AnyReg> r;
+    if (!deadCode_) {
+        r = popJoinRegUnlessVoid(type);
+        block.bceSafeOnExit &= bceSafe_;
+    }
+
+    // Leave the block.
+    popStackOnBlockExit(block.framePushed);
+    popValueStackTo(block.stackSize);
+
+    // Bind after cleanup: branches out will have popped the stack.
+    if (block.label.used()) {
+        masm.bind(&block.label);
+        // No value was provided by the fallthrough but the branch out will
+        // have stored one in joinReg, so capture that.
+        if (deadCode_)
+            r = captureJoinRegUnlessVoid(type);
+        deadCode_ = false;
+    }
+
+    bceSafe_ = block.bceSafeOnExit;
+
+    // Retain the value stored in joinReg by all paths, if there are any.
+    if (!deadCode_)
+        pushJoinRegUnlessVoid(r);
+}
+
+bool
+BaseCompiler::emitLoop()
+{
+    if (!iter_.readLoop())
+        return false;
+
+    if (!deadCode_)
+        sync();                    // Simplifies branching out from block
+
+    initControl(controlItem());
+    bceSafe_ = 0;
+
+    if (!deadCode_) {
+        masm.nopAlign(CodeAlignment);
+        masm.bind(&controlItem(0).label);
+        addInterruptCheck();
+    }
+
+    return true;
+}
+
+void
+BaseCompiler::endLoop(ExprType type)
+{
+    Control& block = controlItem();
+
+    Maybe<AnyReg> r;
+    if (!deadCode_) {
+        r = popJoinRegUnlessVoid(type);
+        // block.bceSafeOnExit need not be updated because it won't be used for
+        // the fallthrough path.
+    }
+
+    popStackOnBlockExit(block.framePushed);
+    popValueStackTo(block.stackSize);
+
+    // bceSafe_ stays the same along the fallthrough path because branches to
+    // loops branch to the top.
+
+    // Retain the value stored in joinReg by all paths.
+    if (!deadCode_)
+        pushJoinRegUnlessVoid(r);
+}
+
+// The bodies of the "then" and "else" arms can be arbitrary sequences
+// of expressions, they push control and increment the nesting and can
+// even be targeted by jumps.  A branch to the "if" block branches to
+// the exit of the if, ie, it's like "break".  Consider:
+//
+//      (func (result i32)
+//       (if (i32.const 1)
+//           (begin (br 1) (unreachable))
+//           (begin (unreachable)))
+//       (i32.const 1))
+//
+// The branch causes neither of the unreachable expressions to be
+// evaluated.
+
+bool
+BaseCompiler::emitIf()
+{
+    Nothing unused_cond;
+    if (!iter_.readIf(&unused_cond))
+        return false;
+
+    BranchState b(&controlItem().otherLabel, BranchState::NoPop, InvertBranch(true));
+    if (!deadCode_) {
+        emitBranchSetup(&b);
+        sync();
+    } else {
+        resetLatentOp();
+    }
+
+    initControl(controlItem());
+
+    if (!deadCode_)
+        emitBranchPerform(&b);
+
+    return true;
+}
+
+void
+BaseCompiler::endIfThen()
+{
+    Control& ifThen = controlItem();
+
+    popStackOnBlockExit(ifThen.framePushed);
+    popValueStackTo(ifThen.stackSize);
+
+    if (ifThen.otherLabel.used())
+        masm.bind(&ifThen.otherLabel);
+
+    if (ifThen.label.used())
+        masm.bind(&ifThen.label);
+
+    if (!deadCode_)
+        ifThen.bceSafeOnExit &= bceSafe_;
+
+    deadCode_ = ifThen.deadOnArrival;
+
+    bceSafe_ = ifThen.bceSafeOnExit & ifThen.bceSafeOnEntry;
+}
+
+bool
+BaseCompiler::emitElse()
+{
+    ExprType thenType;
+    Nothing unused_thenValue;
+
+    if (!iter_.readElse(&thenType, &unused_thenValue))
+        return false;
+
+    Control& ifThenElse = controlItem(0);
+
+    // See comment in endIfThenElse, below.
+
+    // Exit the "then" branch.
+
+    ifThenElse.deadThenBranch = deadCode_;
+
+    Maybe<AnyReg> r;
+    if (!deadCode_)
+        r = popJoinRegUnlessVoid(thenType);
+
+    popStackOnBlockExit(ifThenElse.framePushed);
+    popValueStackTo(ifThenElse.stackSize);
+
+    if (!deadCode_)
+        masm.jump(&ifThenElse.label);
+
+    if (ifThenElse.otherLabel.used())
+        masm.bind(&ifThenElse.otherLabel);
+
+    // Reset to the "else" branch.
+
+    if (!deadCode_) {
+        freeJoinRegUnlessVoid(r);
+        ifThenElse.bceSafeOnExit &= bceSafe_;
+    }
+
+    deadCode_ = ifThenElse.deadOnArrival;
+    bceSafe_ = ifThenElse.bceSafeOnEntry;
+
+    return true;
+}
+
+void
+BaseCompiler::endIfThenElse(ExprType type)
+{
+    Control& ifThenElse = controlItem();
+
+    // The expression type is not a reliable guide to what we'll find
+    // on the stack, we could have (if E (i32.const 1) (unreachable))
+    // in which case the "else" arm is AnyType but the type of the
+    // full expression is I32.  So restore whatever's there, not what
+    // we want to find there.  The "then" arm has the same constraint.
+
+    Maybe<AnyReg> r;
+    if (!deadCode_) {
+        r = popJoinRegUnlessVoid(type);
+        ifThenElse.bceSafeOnExit &= bceSafe_;
+    }
+
+    popStackOnBlockExit(ifThenElse.framePushed);
+    popValueStackTo(ifThenElse.stackSize);
+
+    if (ifThenElse.label.used())
+        masm.bind(&ifThenElse.label);
+
+    bool joinLive = !ifThenElse.deadOnArrival &&
+                    (!ifThenElse.deadThenBranch || !deadCode_ || ifThenElse.label.bound());
+
+    if (joinLive) {
+        // No value was provided by the "then" path but capture the one
+        // provided by the "else" path.
+        if (deadCode_)
+            r = captureJoinRegUnlessVoid(type);
+        deadCode_ = false;
+    }
+
+    bceSafe_ = ifThenElse.bceSafeOnExit;
+
+    if (!deadCode_)
+        pushJoinRegUnlessVoid(r);
+}
+
+bool
+BaseCompiler::emitEnd()
+{
+    LabelKind kind;
+    ExprType type;
+    Nothing unused_value;
+    if (!iter_.readEnd(&kind, &type, &unused_value))
+        return false;
+
+    switch (kind) {
+      case LabelKind::Block: endBlock(type); break;
+      case LabelKind::Loop:  endLoop(type); break;
+      case LabelKind::Then:  endIfThen(); break;
+      case LabelKind::Else:  endIfThenElse(type); break;
+    }
+
+    iter_.popEnd();
+
+    return true;
+}
+
+bool
+BaseCompiler::emitBr()
+{
+    uint32_t relativeDepth;
+    ExprType type;
+    Nothing unused_value;
+    if (!iter_.readBr(&relativeDepth, &type, &unused_value))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    Control& target = controlItem(relativeDepth);
+    target.bceSafeOnExit &= bceSafe_;
+
+    // Save any value in the designated join register, where the
+    // normal block exit code will also leave it.
+
+    Maybe<AnyReg> r = popJoinRegUnlessVoid(type);
+
+    popStackBeforeBranch(target.framePushed);
+    masm.jump(&target.label);
+
+    // The register holding the join value is free for the remainder
+    // of this block.
+
+    freeJoinRegUnlessVoid(r);
+
+    deadCode_ = true;
+
+    return true;
+}
+
+bool
+BaseCompiler::emitBrIf()
+{
+    uint32_t relativeDepth;
+    ExprType type;
+    Nothing unused_value, unused_condition;
+    if (!iter_.readBrIf(&relativeDepth, &type, &unused_value, &unused_condition))
+        return false;
+
+    if (deadCode_) {
+        resetLatentOp();
+        return true;
+    }
+
+    Control& target = controlItem(relativeDepth);
+    target.bceSafeOnExit &= bceSafe_;
+
+    BranchState b(&target.label, target.framePushed, InvertBranch(false), type);
+    emitBranchSetup(&b);
+    emitBranchPerform(&b);
+
+    return true;
+}
+
+bool
+BaseCompiler::emitBrTable()
+{
+    Uint32Vector depths;
+    uint32_t defaultDepth;
+    ExprType branchValueType;
+    Nothing unused_value, unused_index;
+    if (!iter_.readBrTable(&depths, &defaultDepth, &branchValueType, &unused_value, &unused_index))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    // Don't use joinReg for rc
+    maybeReserveJoinRegI(branchValueType);
+
+    // Table switch value always on top.
+    RegI32 rc = popI32();
+
+    maybeUnreserveJoinRegI(branchValueType);
+
+    Maybe<AnyReg> r = popJoinRegUnlessVoid(branchValueType);
+
+    Label dispatchCode;
+    masm.branch32(Assembler::Below, rc, Imm32(depths.length()), &dispatchCode);
+
+    // This is the out-of-range stub.  rc is dead here but we don't need it.
+
+    popStackBeforeBranch(controlItem(defaultDepth).framePushed);
+    controlItem(defaultDepth).bceSafeOnExit &= bceSafe_;
+    masm.jump(&controlItem(defaultDepth).label);
+
+    // Emit stubs.  rc is dead in all of these but we don't need it.
+    //
+    // The labels in the vector are in the TempAllocator and will
+    // be freed by and by.
+    //
+    // TODO / OPTIMIZE (Bug 1316804): Branch directly to the case code if we
+    // can, don't emit an intermediate stub.
+
+    LabelVector stubs;
+    if (!stubs.reserve(depths.length()))
+        return false;
+
+    for (uint32_t depth : depths) {
+        stubs.infallibleEmplaceBack(NonAssertingLabel());
+        masm.bind(&stubs.back());
+        popStackBeforeBranch(controlItem(depth).framePushed);
+        controlItem(depth).bceSafeOnExit &= bceSafe_;
+        masm.jump(&controlItem(depth).label);
+    }
+
+    // Emit table.
+
+    Label theTable;
+    jumpTable(stubs, &theTable);
+
+    // Emit indirect jump.  rc is live here.
+
+    tableSwitch(&theTable, rc, &dispatchCode);
+
+    deadCode_ = true;
+
+    // Clean up.
+
+    freeI32(rc);
+    freeJoinRegUnlessVoid(r);
+
+    return true;
+}
+
+bool
+BaseCompiler::emitDrop()
+{
+    if (!iter_.readDrop())
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    popStackIfMemory();
+    popValueStackBy(1);
+    return true;
+}
+
+void
+BaseCompiler::doReturn(ExprType type, bool popStack)
+{
+    switch (type) {
+      case ExprType::Void: {
+        returnCleanup(popStack);
+        break;
+      }
+      case ExprType::I32: {
+        RegI32 rv = popI32(RegI32(ReturnReg));
+        returnCleanup(popStack);
+        freeI32(rv);
+        break;
+      }
+      case ExprType::I64: {
+        RegI64 rv = popI64(RegI64(ReturnReg64));
+        returnCleanup(popStack);
+        freeI64(rv);
+        break;
+      }
+      case ExprType::F64: {
+        RegF64 rv = popF64(RegF64(ReturnDoubleReg));
+        returnCleanup(popStack);
+        freeF64(rv);
+        break;
+      }
+      case ExprType::F32: {
+        RegF32 rv = popF32(RegF32(ReturnFloat32Reg));
+        returnCleanup(popStack);
+        freeF32(rv);
+        break;
+      }
+      default: {
+        MOZ_CRASH("Function return type");
+      }
+    }
+}
+
+bool
+BaseCompiler::emitReturn()
+{
+    Nothing unused_value;
+    if (!iter_.readReturn(&unused_value))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    doReturn(sig().ret(), PopStack(true));
+    deadCode_ = true;
+
+    return true;
+}
+
+bool
+BaseCompiler::emitCallArgs(const ValTypeVector& argTypes, FunctionCall& baselineCall)
+{
+    MOZ_ASSERT(!deadCode_);
+
+    startCallArgs(baselineCall, stackArgAreaSize(argTypes));
+
+    uint32_t numArgs = argTypes.length();
+    for (size_t i = 0; i < numArgs; ++i)
+        passArg(baselineCall, argTypes[i], peek(numArgs - 1 - i));
+
+    masm.loadWasmTlsRegFromFrame();
+    return true;
+}
+
+void
+BaseCompiler::pushReturned(const FunctionCall& call, ExprType type)
+{
+    switch (type) {
+      case ExprType::Void:
+        MOZ_CRASH("Compiler bug: attempt to push void return");
+        break;
+      case ExprType::I32: {
+        RegI32 rv = captureReturnedI32();
+        pushI32(rv);
+        break;
+      }
+      case ExprType::I64: {
+        RegI64 rv = captureReturnedI64();
+        pushI64(rv);
+        break;
+      }
+      case ExprType::F32: {
+        RegF32 rv = captureReturnedF32(call);
+        pushF32(rv);
+        break;
+      }
+      case ExprType::F64: {
+        RegF64 rv = captureReturnedF64(call);
+        pushF64(rv);
+        break;
+      }
+      default:
+        MOZ_CRASH("Function return type");
+    }
+}
+
+// For now, always sync() at the beginning of the call to easily save live
+// values.
+//
+// TODO / OPTIMIZE (Bug 1316806): We may be able to avoid a full sync(), since
+// all we want is to save live registers that won't be saved by the callee or
+// that we need for outgoing args - we don't need to sync the locals.  We can
+// just push the necessary registers, it'll be like a lightweight sync.
+//
+// Even some of the pushing may be unnecessary if the registers will be consumed
+// by the call, because then what we want is parallel assignment to the argument
+// registers or onto the stack for outgoing arguments.  A sync() is just
+// simpler.
+
+bool
+BaseCompiler::emitCall()
+{
+    uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+    uint32_t funcIndex;
+    BaseOpIter::ValueVector args_;
+    if (!iter_.readCall(&funcIndex, &args_))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    sync();
+
+    const Sig& sig = *env_.funcSigs[funcIndex];
+    bool import = env_.funcIsImport(funcIndex);
+
+    uint32_t numArgs = sig.args().length();
+    size_t stackSpace = stackConsumed(numArgs);
+
+    FunctionCall baselineCall(lineOrBytecode);
+    beginCall(baselineCall, UseABI::Wasm, import ? InterModule::True : InterModule::False);
+
+    if (!emitCallArgs(sig.args(), baselineCall))
+        return false;
+
+    if (import)
+        callImport(env_.funcImportGlobalDataOffsets[funcIndex], baselineCall);
+    else
+        callDefinition(funcIndex, baselineCall);
+
+    endCall(baselineCall, stackSpace);
+
+    popValueStackBy(numArgs);
+
+    if (!IsVoid(sig.ret()))
+        pushReturned(baselineCall, sig.ret());
+
+    return true;
+}
+
+bool
+BaseCompiler::emitCallIndirect()
+{
+    uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+    uint32_t sigIndex;
+    Nothing callee_;
+    BaseOpIter::ValueVector args_;
+    if (!iter_.readCallIndirect(&sigIndex, &callee_, &args_))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    sync();
+
+    const SigWithId& sig = env_.sigs[sigIndex];
+
+    // Stack: ... arg1 .. argn callee
+
+    uint32_t numArgs = sig.args().length();
+    size_t stackSpace = stackConsumed(numArgs + 1);
+
+    // The arguments must be at the stack top for emitCallArgs, so pop the
+    // callee if it is on top.  Note this only pops the compiler's stack,
+    // not the CPU stack.
+
+    Stk callee = stk_.popCopy();
+
+    FunctionCall baselineCall(lineOrBytecode);
+    beginCall(baselineCall, UseABI::Wasm, InterModule::True);
+
+    if (!emitCallArgs(sig.args(), baselineCall))
+        return false;
+
+    callIndirect(sigIndex, callee, baselineCall);
+
+    endCall(baselineCall, stackSpace);
+
+    popValueStackBy(numArgs);
+
+    if (!IsVoid(sig.ret()))
+        pushReturned(baselineCall, sig.ret());
+
+    return true;
+}
+
+void
+BaseCompiler::emitRound(RoundingMode roundingMode, ValType operandType)
+{
+    if (operandType == ValType::F32) {
+        RegF32 f0 = popF32();
+        roundF32(roundingMode, f0);
+        pushF32(f0);
+    } else if (operandType == ValType::F64) {
+        RegF64 f0 = popF64();
+        roundF64(roundingMode, f0);
+        pushF64(f0);
+    } else {
+        MOZ_CRASH("unexpected type");
+    }
+}
+
+bool
+BaseCompiler::emitUnaryMathBuiltinCall(SymbolicAddress callee, ValType operandType)
+{
+    uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+    Nothing operand_;
+    if (!iter_.readUnary(operandType, &operand_))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    RoundingMode roundingMode;
+    if (IsRoundingFunction(callee, &roundingMode) && supportsRoundInstruction(roundingMode)) {
+        emitRound(roundingMode, operandType);
+        return true;
+    }
+
+    sync();
+
+    ValTypeVector& signature = operandType == ValType::F32 ? SigF_ : SigD_;
+    ExprType retType = operandType == ValType::F32 ? ExprType::F32 : ExprType::F64;
+    uint32_t numArgs = signature.length();
+    size_t stackSpace = stackConsumed(numArgs);
+
+    FunctionCall baselineCall(lineOrBytecode);
+    beginCall(baselineCall, UseABI::System, InterModule::False);
+
+    if (!emitCallArgs(signature, baselineCall))
+        return false;
+
+    builtinCall(callee, baselineCall);
+
+    endCall(baselineCall, stackSpace);
+
+    popValueStackBy(numArgs);
+
+    pushReturned(baselineCall, retType);
+
+    return true;
+}
+
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+void
+BaseCompiler::emitDivOrModI64BuiltinCall(SymbolicAddress callee, ValType operandType)
+{
+    MOZ_ASSERT(operandType == ValType::I64);
+    MOZ_ASSERT(!deadCode_);
+
+    sync();
+
+    needI64(abiReturnRegI64);
+
+    RegI64 rhs = popI64();
+    RegI64 srcDest = popI64ToSpecific(abiReturnRegI64);
+
+    Label done;
+
+    checkDivideByZeroI64(rhs);
+
+    if (callee == SymbolicAddress::DivI64)
+        checkDivideSignedOverflowI64(rhs, srcDest, &done, ZeroOnOverflow(false));
+    else if (callee == SymbolicAddress::ModI64)
+        checkDivideSignedOverflowI64(rhs, srcDest, &done, ZeroOnOverflow(true));
+
+    masm.setupWasmABICall();
+    masm.passABIArg(srcDest.high);
+    masm.passABIArg(srcDest.low);
+    masm.passABIArg(rhs.high);
+    masm.passABIArg(rhs.low);
+    masm.callWithABI(bytecodeOffset(), callee);
+
+    masm.bind(&done);
+
+    freeI64(rhs);
+    pushI64(srcDest);
+}
+#endif // RABALDR_INT_DIV_I64_CALLOUT
+
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+bool
+BaseCompiler::emitConvertInt64ToFloatingCallout(SymbolicAddress callee, ValType operandType,
+                                                ValType resultType)
+{
+    sync();
+
+    RegI64 input = popI64();
+
+    FunctionCall call(0);
+
+    masm.setupWasmABICall();
+# if defined(JS_PUNBOX64)
+    MOZ_CRASH("BaseCompiler platform hook: emitConvertInt64ToFloatingCallout");
+# else
+    masm.passABIArg(input.high);
+    masm.passABIArg(input.low);
+# endif
+    masm.callWithABI(bytecodeOffset(), callee,
+                     resultType == ValType::F32 ? MoveOp::FLOAT32 : MoveOp::DOUBLE);
+
+    freeI64(input);
+
+    if (resultType == ValType::F32)
+        pushF32(captureReturnedF32(call));
+    else
+        pushF64(captureReturnedF64(call));
+
+    return true;
+}
+#endif // RABALDR_I64_TO_FLOAT_CALLOUT
+
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+// `Callee` always takes a double, so a float32 input must be converted.
+bool
+BaseCompiler::emitConvertFloatingToInt64Callout(SymbolicAddress callee, ValType operandType,
+                                                ValType resultType)
+{
+    RegF64 doubleInput;
+    if (operandType == ValType::F32) {
+        doubleInput = needF64();
+        RegF32 input = popF32();
+        masm.convertFloat32ToDouble(input, doubleInput);
+        freeF32(input);
+    } else {
+        doubleInput = popF64();
+    }
+
+    // We may need the value after the call for the ool check.
+    RegF64 otherReg = needF64();
+    moveF64(doubleInput, otherReg);
+    pushF64(otherReg);
+
+    sync();
+
+    FunctionCall call(0);
+
+    masm.setupWasmABICall();
+    masm.passABIArg(doubleInput, MoveOp::DOUBLE);
+    masm.callWithABI(bytecodeOffset(), callee);
+
+    freeF64(doubleInput);
+
+    RegI64 rv = captureReturnedI64();
+
+    RegF64 inputVal = popF64();
+
+    bool isUnsigned = callee == SymbolicAddress::TruncateDoubleToUint64;
+
+    // The OOL check just succeeds or fails, it does not generate a value.
+    OutOfLineCode* ool = new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(AnyReg(inputVal),
+                                                                          isUnsigned,
+                                                                          bytecodeOffset());
+    ool = addOutOfLineCode(ool);
+    if (!ool)
+        return false;
+
+    masm.branch64(Assembler::Equal, rv, Imm64(0x8000000000000000), ool->entry());
+    masm.bind(ool->rejoin());
+
+    pushI64(rv);
+    freeF64(inputVal);
+
+    return true;
+}
+#endif // RABALDR_FLOAT_TO_I64_CALLOUT
+
+bool
+BaseCompiler::emitGetLocal()
+{
+    uint32_t slot;
+    if (!iter_.readGetLocal(locals_, &slot))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    // Local loads are pushed unresolved, ie, they may be deferred
+    // until needed, until they may be affected by a store, or until a
+    // sync.  This is intended to reduce register pressure.
+
+    switch (locals_[slot]) {
+      case ValType::I32:
+        pushLocalI32(slot);
+        break;
+      case ValType::I64:
+        pushLocalI64(slot);
+        break;
+      case ValType::F64:
+        pushLocalF64(slot);
+        break;
+      case ValType::F32:
+        pushLocalF32(slot);
+        break;
+      default:
+        MOZ_CRASH("Local variable type");
+    }
+
+    return true;
+}
+
+template<bool isSetLocal>
+bool
+BaseCompiler::emitSetOrTeeLocal(uint32_t slot)
+{
+    if (deadCode_)
+        return true;
+
+    bceLocalIsUpdated(slot);
+    switch (locals_[slot]) {
+      case ValType::I32: {
+        RegI32 rv = popI32();
+        syncLocal(slot);
+        storeToFrameI32(rv, frameOffsetFromSlot(slot, MIRType::Int32));
+        if (isSetLocal)
+            freeI32(rv);
+        else
+            pushI32(rv);
+        break;
+      }
+      case ValType::I64: {
+        RegI64 rv = popI64();
+        syncLocal(slot);
+        storeToFrameI64(rv, frameOffsetFromSlot(slot, MIRType::Int64));
+        if (isSetLocal)
+            freeI64(rv);
+        else
+            pushI64(rv);
+        break;
+      }
+      case ValType::F64: {
+        RegF64 rv = popF64();
+        syncLocal(slot);
+        storeToFrameF64(rv, frameOffsetFromSlot(slot, MIRType::Double));
+        if (isSetLocal)
+            freeF64(rv);
+        else
+            pushF64(rv);
+        break;
+      }
+      case ValType::F32: {
+        RegF32 rv = popF32();
+        syncLocal(slot);
+        storeToFrameF32(rv, frameOffsetFromSlot(slot, MIRType::Float32));
+        if (isSetLocal)
+            freeF32(rv);
+        else
+            pushF32(rv);
+        break;
+      }
+      default:
+        MOZ_CRASH("Local variable type");
+    }
+
+    return true;
+}
+
+bool
+BaseCompiler::emitSetLocal()
+{
+    uint32_t slot;
+    Nothing unused_value;
+    if (!iter_.readSetLocal(locals_, &slot, &unused_value))
+        return false;
+    return emitSetOrTeeLocal<true>(slot);
+}
+
+bool
+BaseCompiler::emitTeeLocal()
+{
+    uint32_t slot;
+    Nothing unused_value;
+    if (!iter_.readTeeLocal(locals_, &slot, &unused_value))
+        return false;
+    return emitSetOrTeeLocal<false>(slot);
+}
+
+bool
+BaseCompiler::emitGetGlobal()
+{
+    uint32_t id;
+    if (!iter_.readGetGlobal(&id))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    const GlobalDesc& global = env_.globals[id];
+
+    if (global.isConstant()) {
+        Val value = global.constantValue();
+        switch (value.type()) {
+          case ValType::I32:
+            pushI32(value.i32());
+            break;
+          case ValType::I64:
+            pushI64(value.i64());
+            break;
+          case ValType::F32:
+            pushF32(value.f32());
+            break;
+          case ValType::F64:
+            pushF64(value.f64());
+            break;
+          default:
+            MOZ_CRASH("Global constant type");
+        }
+        return true;
+    }
+
+    switch (global.type()) {
+      case ValType::I32: {
+        RegI32 rv = needI32();
+        loadGlobalVarI32(global.offset(), rv);
+        pushI32(rv);
+        break;
+      }
+      case ValType::I64: {
+        RegI64 rv = needI64();
+        loadGlobalVarI64(global.offset(), rv);
+        pushI64(rv);
+        break;
+      }
+      case ValType::F32: {
+        RegF32 rv = needF32();
+        loadGlobalVarF32(global.offset(), rv);
+        pushF32(rv);
+        break;
+      }
+      case ValType::F64: {
+        RegF64 rv = needF64();
+        loadGlobalVarF64(global.offset(), rv);
+        pushF64(rv);
+        break;
+      }
+      default:
+        MOZ_CRASH("Global variable type");
+        break;
+    }
+    return true;
+}
+
+bool
+BaseCompiler::emitSetGlobal()
+{
+    uint32_t id;
+    Nothing unused_value;
+    if (!iter_.readSetGlobal(&id, &unused_value))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    const GlobalDesc& global = env_.globals[id];
+
+    switch (global.type()) {
+      case ValType::I32: {
+        RegI32 rv = popI32();
+        storeGlobalVarI32(global.offset(), rv);
+        freeI32(rv);
+        break;
+      }
+      case ValType::I64: {
+        RegI64 rv = popI64();
+        storeGlobalVarI64(global.offset(), rv);
+        freeI64(rv);
+        break;
+      }
+      case ValType::F32: {
+        RegF32 rv = popF32();
+        storeGlobalVarF32(global.offset(), rv);
+        freeF32(rv);
+        break;
+      }
+      case ValType::F64: {
+        RegF64 rv = popF64();
+        storeGlobalVarF64(global.offset(), rv);
+        freeF64(rv);
+        break;
+      }
+      default:
+        MOZ_CRASH("Global variable type");
+        break;
+    }
+    return true;
+}
+
+// Bounds check elimination.
+//
+// We perform BCE on two kinds of address expressions: on constant heap pointers
+// that are known to be in the heap or will be handled by the out-of-bounds trap
+// handler; and on local variables that have been checked in dominating code
+// without being updated since.
+//
+// For an access through a constant heap pointer + an offset we can eliminate
+// the bounds check if the sum of the address and offset is below the sum of the
+// minimum memory length and the offset guard length.
+//
+// For an access through a local variable + an offset we can eliminate the
+// bounds check if the local variable has already been checked and has not been
+// updated since, and the offset is less than the guard limit.
+//
+// To track locals for which we can eliminate checks we use a bit vector
+// bceSafe_ that has a bit set for those locals whose bounds have been checked
+// and which have not subsequently been set.  Initially this vector is zero.
+//
+// In straight-line code a bit is set when we perform a bounds check on an
+// access via the local and is reset when the variable is updated.
+//
+// In control flow, the bit vector is manipulated as follows.  Each ControlItem
+// has a value bceSafeOnEntry, which is the value of bceSafe_ on entry to the
+// item, and a value bceSafeOnExit, which is initially ~0.  On a branch (br,
+// brIf, brTable), we always AND the branch target's bceSafeOnExit with the
+// value of bceSafe_ at the branch point.  On exiting an item by falling out of
+// it, provided we're not in dead code, we AND the current value of bceSafe_
+// into the item's bceSafeOnExit.  Additional processing depends on the item
+// type:
+//
+//  - After a block, set bceSafe_ to the block's bceSafeOnExit.
+//
+//  - On loop entry, after pushing the ControlItem, set bceSafe_ to zero; the
+//    back edges would otherwise require us to iterate to a fixedpoint.
+//
+//  - After a loop, the bceSafe_ is left unchanged, because only fallthrough
+//    control flow will reach that point and the bceSafe_ value represents the
+//    correct state of the fallthrough path.
+//
+//  - Set bceSafe_ to the ControlItem's bceSafeOnEntry at both the 'then' branch
+//    and the 'else' branch.
+//
+//  - After an if-then-else, set bceSafe_ to the if-then-else's bceSafeOnExit.
+//
+//  - After an if-then, set bceSafe_ to the if-then's bceSafeOnExit AND'ed with
+//    the if-then's bceSafeOnEntry.
+//
+// Finally, when the debugger allows locals to be mutated we must disable BCE
+// for references via a local, by returning immediately from bceCheckLocal if
+// debugEnabled_ is true.
+//
+//
+// Alignment check elimination.
+//
+// Alignment checks for atomic operations can be omitted if the pointer is a
+// constant and the pointer + offset is aligned.  Alignment checking that can't
+// be omitted can still be simplified by checking only the pointer if the offset
+// is aligned.
+//
+// (In addition, alignment checking of the pointer can be omitted if the pointer
+// has been checked in dominating code, but we don't do that yet.)
+
+// TODO / OPTIMIZE (bug 1329576): There are opportunities to generate better
+// code by not moving a constant address with a zero offset into a register.
+
+RegI32
+BaseCompiler::popMemoryAccess(MemoryAccessDesc* access, AccessCheck* check)
+{
+    check->onlyPointerAlignment = (access->offset() & (access->byteSize() - 1)) == 0;
+
+    int32_t addrTmp;
+    if (popConstI32(&addrTmp)) {
+        uint32_t addr = addrTmp;
+
+        uint64_t ea = uint64_t(addr) + uint64_t(access->offset());
+        uint64_t limit = uint64_t(env_.minMemoryLength) + uint64_t(wasm::OffsetGuardLimit);
+
+        check->omitBoundsCheck = ea < limit;
+        check->omitAlignmentCheck = (ea & (access->byteSize() - 1)) == 0;
+
+        // Fold the offset into the pointer if we can, as this is always
+        // beneficial.
+
+        if (ea <= UINT32_MAX) {
+            addr = uint32_t(ea);
+            access->clearOffset();
+        }
+
+        RegI32 r = needI32();
+        loadConstI32(r, int32_t(addr));
+        return r;
+    }
+
+    uint32_t local;
+    if (peekLocalI32(&local))
+        bceCheckLocal(access, check, local);
+
+    return popI32();
+}
+
+RegI32
+BaseCompiler::maybeLoadTlsForAccess(const AccessCheck& check)
+{
+    RegI32 tls;
+    if (needTlsForAccess(check)) {
+        tls = needI32();
+        masm.loadWasmTlsRegFromFrame(tls);
+    }
+    return tls;
+}
+
+bool
+BaseCompiler::loadCommon(MemoryAccessDesc* access, ValType type)
+{
+    AccessCheck check;
+
+    RegI32 tls, tmp1, tmp2, tmp3;
+    needLoadTemps(*access, &tmp1, &tmp2, &tmp3);
+
+    switch (type) {
+      case ValType::I32: {
+        RegI32 rp = popMemoryAccess(access, &check);
+#ifdef JS_CODEGEN_ARM
+        RegI32 rv = IsUnaligned(*access) ? needI32() : rp;
+#else
+        RegI32 rv = rp;
+#endif
+        tls = maybeLoadTlsForAccess(check);
+        if (!load(access, &check, tls, rp, AnyReg(rv), tmp1, tmp2, tmp3))
+            return false;
+        pushI32(rv);
+        if (rp != rv)
+            freeI32(rp);
+        break;
+      }
+      case ValType::I64: {
+        RegI64 rv;
+        RegI32 rp;
+#ifdef JS_CODEGEN_X86
+        rv = abiReturnRegI64;
+        needI64(rv);
+        rp = popMemoryAccess(access, &check);
+#else
+        rp = popMemoryAccess(access, &check);
+        rv = needI64();
+#endif
+        tls = maybeLoadTlsForAccess(check);
+        if (!load(access, &check, tls, rp, AnyReg(rv), tmp1, tmp2, tmp3))
+            return false;
+        pushI64(rv);
+        freeI32(rp);
+        break;
+      }
+      case ValType::F32: {
+        RegI32 rp = popMemoryAccess(access, &check);
+        RegF32 rv = needF32();
+        tls = maybeLoadTlsForAccess(check);
+        if (!load(access, &check, tls, rp, AnyReg(rv), tmp1, tmp2, tmp3))
+            return false;
+        pushF32(rv);
+        freeI32(rp);
+        break;
+      }
+      case ValType::F64: {
+        RegI32 rp = popMemoryAccess(access, &check);
+        RegF64 rv = needF64();
+        tls = maybeLoadTlsForAccess(check);
+        if (!load(access, &check, tls, rp, AnyReg(rv), tmp1, tmp2, tmp3))
+            return false;
+        pushF64(rv);
+        freeI32(rp);
+        break;
+      }
+      default:
+        MOZ_CRASH("load type");
+        break;
+    }
+
+    maybeFreeI32(tls);
+    maybeFreeI32(tmp1);
+    maybeFreeI32(tmp2);
+    maybeFreeI32(tmp3);
+
+    return true;
+}
+
+bool
+BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
+{
+    LinearMemoryAddress<Nothing> addr;
+    if (!iter_.readLoad(type, Scalar::byteSize(viewType), &addr))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(bytecodeOffset()));
+    return loadCommon(&access, type);
+}
+
+bool
+BaseCompiler::storeCommon(MemoryAccessDesc* access, ValType resultType)
+{
+    AccessCheck check;
+
+    RegI32 tls, tmp;
+    needStoreTemps(*access, resultType, &tmp);
+
+    switch (resultType) {
+      case ValType::I32: {
+        RegI32 rv = popI32();
+        RegI32 rp = popMemoryAccess(access, &check);
+        tls = maybeLoadTlsForAccess(check);
+        if (!store(access, &check, tls, rp, AnyReg(rv), tmp))
+            return false;
+        freeI32(rp);
+        freeI32(rv);
+        break;
+      }
+      case ValType::I64: {
+        RegI64 rv = popI64();
+        RegI32 rp = popMemoryAccess(access, &check);
+        tls = maybeLoadTlsForAccess(check);
+        if (!store(access, &check, tls, rp, AnyReg(rv), tmp))
+            return false;
+        freeI32(rp);
+        freeI64(rv);
+        break;
+      }
+      case ValType::F32: {
+        RegF32 rv = popF32();
+        RegI32 rp = popMemoryAccess(access, &check);
+        tls = maybeLoadTlsForAccess(check);
+        if (!store(access, &check, tls, rp, AnyReg(rv), tmp))
+            return false;
+        freeI32(rp);
+        freeF32(rv);
+        break;
+      }
+      case ValType::F64: {
+        RegF64 rv = popF64();
+        RegI32 rp = popMemoryAccess(access, &check);
+        tls = maybeLoadTlsForAccess(check);
+        if (!store(access, &check, tls, rp, AnyReg(rv), tmp))
+            return false;
+        freeI32(rp);
+        freeF64(rv);
+        break;
+      }
+      default:
+        MOZ_CRASH("store type");
+        break;
+    }
+
+    maybeFreeI32(tls);
+    maybeFreeI32(tmp);
+
+    return true;
+}
+
+bool
+BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
+{
+    LinearMemoryAddress<Nothing> addr;
+    Nothing unused_value;
+    if (!iter_.readStore(resultType, Scalar::byteSize(viewType), &addr, &unused_value))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(bytecodeOffset()));
+    return storeCommon(&access, resultType);
+}
+
+bool
+BaseCompiler::emitSelect()
+{
+    StackType type;
+    Nothing unused_trueValue;
+    Nothing unused_falseValue;
+    Nothing unused_condition;
+    if (!iter_.readSelect(&type, &unused_trueValue, &unused_falseValue, &unused_condition))
+        return false;
+
+    if (deadCode_) {
+        resetLatentOp();
+        return true;
+    }
+
+    // I32 condition on top, then false, then true.
+
+    Label done;
+    BranchState b(&done);
+    emitBranchSetup(&b);
+
+    switch (NonAnyToValType(type)) {
+      case ValType::I32: {
+        RegI32 r0, r1;
+        pop2xI32(&r0, &r1);
+        emitBranchPerform(&b);
+        moveI32(r1, r0);
+        masm.bind(&done);
+        freeI32(r1);
+        pushI32(r0);
+        break;
+      }
+      case ValType::I64: {
+#ifdef JS_CODEGEN_X86
+        // There may be as many as four Int64 values in registers at a time: two
+        // for the latent branch operands, and two for the true/false values we
+        // normally pop before executing the branch.  On x86 this is one value
+        // too many, so we need to generate more complicated code here, and for
+        // simplicity's sake we do so even if the branch operands are not Int64.
+        // However, the resulting control flow diamond is complicated since the
+        // arms of the diamond will have to stay synchronized with respect to
+        // their evaluation stack and regalloc state.  To simplify further, we
+        // use a double branch and a temporary boolean value for now.
+        RegI32 tmp = needI32();
+        loadConstI32(tmp, 0);
+        emitBranchPerform(&b);
+        loadConstI32(tmp, 1);
+        masm.bind(&done);
+
+        Label trueValue;
+        RegI64 r0, r1;
+        pop2xI64(&r0, &r1);
+        masm.branch32(Assembler::Equal, tmp, Imm32(0), &trueValue);
+        moveI64(r1, r0);
+        masm.bind(&trueValue);
+        freeI32(tmp);
+        freeI64(r1);
+        pushI64(r0);
+#else
+        RegI64 r0, r1;
+        pop2xI64(&r0, &r1);
+        emitBranchPerform(&b);
+        moveI64(r1, r0);
+        masm.bind(&done);
+        freeI64(r1);
+        pushI64(r0);
+#endif
+        break;
+      }
+      case ValType::F32: {
+        RegF32 r0, r1;
+        pop2xF32(&r0, &r1);
+        emitBranchPerform(&b);
+        moveF32(r1, r0);
+        masm.bind(&done);
+        freeF32(r1);
+        pushF32(r0);
+        break;
+      }
+      case ValType::F64: {
+        RegF64 r0, r1;
+        pop2xF64(&r0, &r1);
+        emitBranchPerform(&b);
+        moveF64(r1, r0);
+        masm.bind(&done);
+        freeF64(r1);
+        pushF64(r0);
+        break;
+      }
+      default: {
+        MOZ_CRASH("select type");
+      }
+    }
+
+    return true;
+}
+
+void
+BaseCompiler::emitCompareI32(Assembler::Condition compareOp, ValType compareType)
+{
+    MOZ_ASSERT(compareType == ValType::I32);
+
+    if (sniffConditionalControlCmp(compareOp, compareType))
+        return;
+
+    int32_t c;
+    if (popConstI32(&c)) {
+        RegI32 r0 = popI32();
+        masm.cmp32Set(compareOp, r0, Imm32(c), r0);
+        pushI32(r0);
+    } else {
+        RegI32 r0, r1;
+        pop2xI32(&r0, &r1);
+        masm.cmp32Set(compareOp, r0, r1, r0);
+        freeI32(r1);
+        pushI32(r0);
+    }
+}
+
+void
+BaseCompiler::emitCompareI64(Assembler::Condition compareOp, ValType compareType)
+{
+    MOZ_ASSERT(compareType == ValType::I64);
+
+    if (sniffConditionalControlCmp(compareOp, compareType))
+        return;
+
+    RegI64 r0, r1;
+    pop2xI64(&r0, &r1);
+    RegI32 i0(fromI64(r0));
+    cmp64Set(compareOp, r0, r1, i0);
+    freeI64(r1);
+    freeI64Except(r0, i0);
+    pushI32(i0);
+}
+
+void
+BaseCompiler::emitCompareF32(Assembler::DoubleCondition compareOp, ValType compareType)
+{
+    MOZ_ASSERT(compareType == ValType::F32);
+
+    if (sniffConditionalControlCmp(compareOp, compareType))
+        return;
+
+    Label across;
+    RegF32 r0, r1;
+    pop2xF32(&r0, &r1);
+    RegI32 i0 = needI32();
+    masm.mov(ImmWord(1), i0);
+    masm.branchFloat(compareOp, r0, r1, &across);
+    masm.mov(ImmWord(0), i0);
+    masm.bind(&across);
+    freeF32(r0);
+    freeF32(r1);
+    pushI32(i0);
+}
+
+void
+BaseCompiler::emitCompareF64(Assembler::DoubleCondition compareOp, ValType compareType)
+{
+    MOZ_ASSERT(compareType == ValType::F64);
+
+    if (sniffConditionalControlCmp(compareOp, compareType))
+        return;
+
+    Label across;
+    RegF64 r0, r1;
+    pop2xF64(&r0, &r1);
+    RegI32 i0 = needI32();
+    masm.mov(ImmWord(1), i0);
+    masm.branchDouble(compareOp, r0, r1, &across);
+    masm.mov(ImmWord(0), i0);
+    masm.bind(&across);
+    freeF64(r0);
+    freeF64(r1);
+    pushI32(i0);
+}
+
+void
+BaseCompiler::emitInstanceCall(uint32_t lineOrBytecode, const MIRTypeVector& sig,
+                               ExprType retType, SymbolicAddress builtin)
+{
+    MOZ_ASSERT(sig[0] == MIRType::Pointer);
+
+    sync();
+
+    uint32_t numArgs = sig.length() - 1 /* instance */;
+    size_t stackSpace = stackConsumed(numArgs);
+
+    FunctionCall baselineCall(lineOrBytecode);
+    beginCall(baselineCall, UseABI::System, InterModule::True);
+
+    ABIArg instanceArg = reservePointerArgument(baselineCall);
+
+    startCallArgs(baselineCall, stackArgAreaSize(sig));
+    for (uint32_t i = 1; i < sig.length(); i++) {
+        ValType t;
+        switch (sig[i]) {
+          case MIRType::Int32: t = ValType::I32; break;
+          case MIRType::Int64: t = ValType::I64; break;
+          default:             MOZ_CRASH("Unexpected type");
+        }
+        passArg(baselineCall, t, peek(numArgs - i));
+    }
+    builtinInstanceMethodCall(builtin, instanceArg, baselineCall);
+    endCall(baselineCall, stackSpace);
+
+    popValueStackBy(numArgs);
+
+    pushReturned(baselineCall, retType);
+}
+
+bool
+BaseCompiler::emitGrowMemory()
+{
+    uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+    Nothing arg;
+    if (!iter_.readGrowMemory(&arg))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    emitInstanceCall(lineOrBytecode, SigPI_, ExprType::I32, SymbolicAddress::GrowMemory);
+    return true;
+}
+
+bool
+BaseCompiler::emitCurrentMemory()
+{
+    uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+    if (!iter_.readCurrentMemory())
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    emitInstanceCall(lineOrBytecode, SigP_, ExprType::I32, SymbolicAddress::CurrentMemory);
+    return true;
+}
+
+bool
+BaseCompiler::emitAtomicCmpXchg(ValType type, Scalar::Type viewType)
+{
+    LinearMemoryAddress<Nothing> addr;
+    Nothing unused;
+
+    if (!iter_.readAtomicCmpXchg(&addr, type, Scalar::byteSize(viewType), &unused, &unused))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(bytecodeOffset()),
+                            /*numSimdExprs=*/ 0, MembarFull, MembarFull);
+
+    if (Scalar::byteSize(viewType) <= 4) {
+        bool narrowing = type == ValType::I64;
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+        needI32(specific_eax);
+        RegI32 rnew = narrowing ? popI64ToI32() : popI32();
+        RegI32 rexpect = narrowing ? popI64ToSpecificI32(specific_eax)
+                                   : popI32ToSpecific(specific_eax);
+        RegI32 rd = rexpect;
+#elif defined(JS_CODEGEN_ARM)
+        RegI32 rnew = narrowing ? popI64ToI32() : popI32();
+        RegI32 rexpect = narrowing ? popI64ToI32() : popI32();
+        RegI32 rd = needI32();
+#else
+        RegI32 rnew, rexpect, rd;
+        MOZ_CRASH("BaseCompiler porting interface: compareExchange");
+#endif
+        AccessCheck check;
+        RegI32 rp = popMemoryAccess(&access, &check);
+        RegI32 tls = maybeLoadTlsForAccess(check);
+
+        atomicCompareExchange(&access, &check, tls, rp, rexpect, rnew, rd);
+
+        maybeFreeI32(tls);
+        freeI32(rp);
+        freeI32(rnew);
+        if (rexpect != rd)
+            freeI32(rexpect);
+
+        if (narrowing)
+            pushU32AsI64(rd);
+        else
+            pushI32(rd);
+
+        return true;
+    }
+
+    MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+
+#ifdef JS_CODEGEN_X64
+    needI64(specific_rax);
+    RegI64 rreplace = popI64();
+    RegI64 rexpect = popI64ToSpecific(specific_rax);
+    RegI64 rd = rexpect;
+#elif defined(JS_CODEGEN_X86)
+    needI32(specific_ecx);
+    needI64(specific_edx_eax);
+    // Claim scratch after the need() calls because they may need it to sync.
+    ScratchEBX scratch(*this);
+    RegI64 rreplace = popI64ToSpecific(specific_ecx_ebx);
+    RegI64 rexpect = popI64ToSpecific(specific_edx_eax);
+    RegI64 rd = rexpect;
+#elif defined(JS_CODEGEN_ARM)
+    RegI64 rreplace = popI64Pair();
+    RegI64 rexpect = popI64();
+    RegI64 rd = needI64Pair();
+#else
+    RegI64 rreplace, rexpect, rd;
+    MOZ_CRASH("BaseCompiler porting interface: compareExchange");
+#endif
+
+    AccessCheck check;
+    RegI32 rp = popMemoryAccess(&access, &check);
+    RegI32 tls = maybeLoadTlsForAccess(check);
+    prepareMemoryAccess(&access, &check, tls, rp);
+    ATOMIC_PTR(srcAddr, &access, tls, rp);
+    masm.compareExchange64(srcAddr, rexpect, rreplace, rd);
+
+    pushI64(rd);
+
+    maybeFreeI32(tls);
+    freeI32(rp);
+#if defined(JS_CODEGEN_X64)
+    freeI64(rreplace);
+#elif defined(JS_CODEGEN_X86)
+    freeI32(specific_ecx);
+#elif defined(JS_CODEGEN_ARM)
+    freeI64(rexpect);
+    freeI64(rreplace);
+#else
+    MOZ_CRASH("BaseCompiler porting interface: compareExchange");
+#endif
+
+    return true;
+}
+
+bool
+BaseCompiler::emitAtomicLoad(ValType type, Scalar::Type viewType)
+{
+    LinearMemoryAddress<Nothing> addr;
+    if (!iter_.readAtomicLoad(&addr, type, Scalar::byteSize(viewType)))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(bytecodeOffset()),
+                            /*numSimdElems=*/ 0, MembarBeforeLoad, MembarAfterLoad);
+
+    if (Scalar::byteSize(viewType) <= sizeof(void*))
+        return loadCommon(&access, type);
+
+    MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+
+#if defined(JS_64BIT)
+    MOZ_CRASH("Should not happen");
+#else
+
+# if defined(JS_CODEGEN_X86)
+    needI32(specific_ecx);
+    needI64(specific_edx_eax);
+    // Claim scratch after the need() calls because they may need it to sync.
+    ScratchEBX scratch(*this);
+    RegI64 tmp = specific_ecx_ebx;
+    RegI64 output = specific_edx_eax;
+# elif defined(JS_CODEGEN_ARM)
+    RegI64 tmp;
+    RegI64 output = needI64Pair();
+# else
+    RegI64 tmp, output;
+    MOZ_CRASH("BaseCompiler porting interface: atomic load 64-bit");
+# endif
+
+    AccessCheck check;
+    RegI32 rp = popMemoryAccess(&access, &check);
+    RegI32 tls = maybeLoadTlsForAccess(check);
+    prepareMemoryAccess(&access, &check, tls, rp);
+    ATOMIC_PTR(srcAddr, &access, tls, rp);
+
+    masm.atomicLoad64(srcAddr, tmp, output);
+    pushI64(output);
+
+    freeI32(rp);
+    maybeFreeI32(tls);
+# if defined(JS_CODEGEN_X86)
+    freeI32(specific_ecx);
+# elif defined(JS_CODEGEN_ARM)
+    // Nothing
+# else
+    MOZ_CRASH("BaseCompiler porting interface: atomic load 64-bit");
+# endif
+
+    return true;
+
+#endif // JS_64BIT
+}
+
+bool
+BaseCompiler::emitAtomicRMW(ValType type, Scalar::Type viewType, AtomicOp op)
+{
+    LinearMemoryAddress<Nothing> addr;
+    Nothing unused_value;
+    if (!iter_.readAtomicRMW(&addr, type, Scalar::byteSize(viewType), &unused_value))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(bytecodeOffset()),
+                            /*numSimdElems=*/ 0, MembarFull, MembarFull);
+
+    if (Scalar::byteSize(viewType) <= 4) {
+        AccessCheck check;
+        bool narrowing = type == ValType::I64;
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+        bool isAddSub = op == AtomicFetchAddOp || op == AtomicFetchSubOp;
+        needI32(specific_eax);
+        RegI32 rv = narrowing
+                  ? (isAddSub ? popI64ToSpecificI32(specific_eax) : popI64ToI32())
+                  : (isAddSub ? popI32ToSpecific(specific_eax) : popI32());
+        RegI32 rp = popMemoryAccess(&access, &check);
+        RegI32 output = isAddSub ? rv : specific_eax;
+#elif defined(JS_CODEGEN_ARM)
+        RegI32 rv = narrowing ? popI64ToI32() : popI32();
+        RegI32 rp = popMemoryAccess(&access, &check);
+        RegI32 output = needI32();
+#else
+        RegI32 rv, rp, output;
+        MOZ_CRASH("BaseCompiler porting interface: atomic rmw");
+#endif
+        RegI32 tls = maybeLoadTlsForAccess(check);
+        RegI32 tmp;
+        needAtomicRMWTemps(op, &access, &tmp);
+
+        atomicRMW(op, &access, &check, tls, rp, rv, output, tmp);
+
+        maybeFreeI32(tls);
+        maybeFreeI32(tmp);
+        freeI32(rp);
+        if (rv != output)
+            freeI32(rv);
+
+        if (narrowing)
+            pushU32AsI64(output);
+        else
+            pushI32(output);
+        return true;
+    }
+
+    MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+
+#if defined(JS_CODEGEN_X86)
+
+    sync();
+
+    needI32NoSync(specific_eax);
+    ScratchEBX scratch(*this);           // Already allocated
+    needI32NoSync(specific_ecx);
+    needI32NoSync(specific_edx);
+    needI32NoSync(specific_edi);
+    needI32NoSync(specific_esi);
+
+    AccessCheck check;
+    MOZ_ASSERT(needTlsForAccess(check));
+
+    RegI64 tmp = specific_ecx_ebx;
+    popI64ToSpecific(tmp);
+
+    RegI32 ptr = specific_esi;
+    popI32ToSpecific(ptr);
+
+    RegI32 tls = specific_edi;
+    RegI32 memoryBase = specific_edi;     // Yes, same
+    masm.loadWasmTlsRegFromFrame(tls);
+
+    prepareMemoryAccess(&access, &check, tls, ptr);
+    masm.movl(Operand(Address(tls, offsetof(TlsData, memoryBase))), memoryBase);
+
+    masm.Push(ecx);
+    masm.Push(ebx);
+
+    RegI64 rd = specific_edx_eax;
+
+    BaseIndex srcAddr(memoryBase, ptr, TimesOne, access.offset());
+    Address value(esp, 0);
+    atomicRMW64(op, value, srcAddr, tmp, rd);
+
+    masm.freeStack(8);
+
+    pushI64(rd);
+    freeI32(specific_ecx);
+    freeI32(specific_edi);
+    freeI32(specific_esi);
+
+#else // !JS_CODEGEN_X86
+
+    AccessCheck check;
+# if defined(JS_CODEGEN_X64)
+    bool isAddSub = op == AtomicFetchAddOp || op == AtomicFetchSubOp;
+    needI64(specific_rax);
+    RegI64 rv = isAddSub ? popI64ToSpecific(specific_rax) : popI64();
+    RegI32 rp = popMemoryAccess(&access, &check);
+    RegI64 rd = isAddSub ? rv : specific_rax;
+# elif defined(JS_CODEGEN_ARM)
+    RegI64 rv = popI64();
+    RegI32 rp = popMemoryAccess(&access, &check);
+    RegI64 rd = needI64Pair();
+#  else
+    RegI64 rv, rd;
+    RegI32 rp;
+    MOZ_CRASH("BaseCompiler porting interface: 64-bit atomic RMW");
+# endif
+
+    RegI32 tls = maybeLoadTlsForAccess(check);
+    RegI64 tmp;
+    needAtomicRMW64Temps(op, &tmp);
+
+    prepareMemoryAccess(&access, &check, tls, rp);
+    ATOMIC_PTR(srcAddr, &access, tls, rp);
+
+    atomicRMW64(op, rv, srcAddr, tmp, rd);
+
+    pushI64(rd);
+
+    maybeFreeI32(tls);
+    freeI32(rp);
+    if (rv != rd)
+        freeI64(rv);
+    maybeFreeI64(tmp);
+
+#endif // !JS_CODEGEN_X86
+
+    return true;
+}
+
+bool
+BaseCompiler::emitAtomicStore(ValType type, Scalar::Type viewType)
+{
+    LinearMemoryAddress<Nothing> addr;
+    Nothing unused_value;
+    if (!iter_.readAtomicStore(&addr, type, Scalar::byteSize(viewType), &unused_value))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(bytecodeOffset()),
+                            /*numSimdElems=*/ 0, MembarBeforeStore, MembarAfterStore);
+
+    if (Scalar::byteSize(viewType) <= sizeof(void*))
+        return storeCommon(&access, type);
+
+    MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+
+#ifdef JS_64BIT
+    MOZ_CRASH("Should not happen");
+#else
+    xchg64(&access, type, WantResult(false));
+#endif
+
+    return true;
+}
+
+bool
+BaseCompiler::emitAtomicXchg(ValType type, Scalar::Type viewType)
+{
+    LinearMemoryAddress<Nothing> addr;
+    Nothing unused_value;
+    if (!iter_.readAtomicRMW(&addr, type, Scalar::byteSize(viewType), &unused_value))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    AccessCheck check;
+    MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(bytecodeOffset()),
+                            /*numSimdElems=*/ 0, MembarFull, MembarFull);
+
+    if (Scalar::byteSize(viewType) <= 4) {
+        bool narrowing = type == ValType::I64;
+        RegI32 rv = narrowing ? popI64ToI32() : popI32();
+        RegI32 rp = popMemoryAccess(&access, &check);
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+        RegI32 rd = rv;
+#else
+        RegI32 rd = needI32();
+#endif
+        RegI32 tls = maybeLoadTlsForAccess(check);
+
+        atomicExchange(&access, &check, tls, rp, rv, rd);
+
+        maybeFreeI32(tls);
+        freeI32(rp);
+        if (rv != rd)
+            freeI32(rv);
+
+        if (narrowing)
+            pushU32AsI64(rd);
+        else
+            pushI32(rd);
+        return true;
+    }
+
+    MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+
+#ifdef JS_64BIT
+    RegI64 rv = popI64();
+    RegI32 rp = popMemoryAccess(&access, &check);
+#ifdef JS_CODEGEN_X64
+    RegI64 rd = rv;
+#else
+    RegI64 rd = needI64();
+#endif
+    RegI32 tls = maybeLoadTlsForAccess(check);
+
+    prepareMemoryAccess(&access, &check, tls, rp);
+    ATOMIC_PTR(srcAddr, &access, tls, rp);
+
+    masm.atomicExchange64(srcAddr, rv, rd);
+    pushI64(rd);
+
+    maybeFreeI32(tls);
+    freeI32(rp);
+    if (rv != rd)
+        freeI64(rv);
+#else
+    xchg64(&access, type, WantResult(true));
+#endif
+
+    return true;
+}
+
+bool
+BaseCompiler::emitWait(ValType type, uint32_t byteSize)
+{
+    uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+    Nothing nothing;
+    LinearMemoryAddress<Nothing> addr;
+    if (!iter_.readWait(&addr, type, byteSize, &nothing, &nothing))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    switch (type) {
+      case ValType::I32:
+        emitInstanceCall(lineOrBytecode, SigPIIL_, ExprType::I32, SymbolicAddress::WaitI32);
+        break;
+      case ValType::I64:
+        emitInstanceCall(lineOrBytecode, SigPILL_, ExprType::I32, SymbolicAddress::WaitI64);
+        break;
+      default:
+        MOZ_CRASH();
+    }
+    masm.branchTest32(Assembler::Signed, ReturnReg, ReturnReg, trap(Trap::ThrowReported));
+
+    return true;
+}
+
+bool
+BaseCompiler::emitWake()
+{
+    uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+    Nothing nothing;
+    LinearMemoryAddress<Nothing> addr;
+    if (!iter_.readWake(&addr, &nothing))
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    emitInstanceCall(lineOrBytecode, SigPII_, ExprType::I32, SymbolicAddress::Wake);
+    masm.branchTest32(Assembler::Signed, ReturnReg, ReturnReg, trap(Trap::ThrowReported));
+
+    return true;
+}
+
+bool
+BaseCompiler::emitBody()
+{
+    if (!iter_.readFunctionStart(sig().ret()))
+        return false;
+
+    initControl(controlItem());
+
+    uint32_t overhead = 0;
+
+    for (;;) {
+
+        Nothing unused_a, unused_b;
+
+#ifdef DEBUG
+        performRegisterLeakCheck();
+#endif
+
+#define emitBinary(doEmit, type) \
+        iter_.readBinary(type, &unused_a, &unused_b) && (deadCode_ || (doEmit(), true))
+
+#define emitUnary(doEmit, type) \
+        iter_.readUnary(type, &unused_a) && (deadCode_ || (doEmit(), true))
+
+#define emitComparison(doEmit, operandType, compareOp) \
+        iter_.readComparison(operandType, &unused_a, &unused_b) && \
+            (deadCode_ || (doEmit(compareOp, operandType), true))
+
+#define emitConversion(doEmit, inType, outType) \
+        iter_.readConversion(inType, outType, &unused_a) && (deadCode_ || (doEmit(), true))
+
+#define emitConversionOOM(doEmit, inType, outType) \
+        iter_.readConversion(inType, outType, &unused_a) && (deadCode_ || doEmit())
+
+#define emitCalloutConversionOOM(doEmit, symbol, inType, outType) \
+        iter_.readConversion(inType, outType, &unused_a) && \
+            (deadCode_ || doEmit(symbol, inType, outType))
+
+#define emitIntDivCallout(doEmit, symbol, type) \
+        iter_.readBinary(type, &unused_a, &unused_b) && (deadCode_ || (doEmit(symbol, type), true))
+
+#define CHECK(E)      if (!(E)) return false
+#define NEXT()        continue
+#define CHECK_NEXT(E) if (!(E)) return false; continue
+
+        // TODO / EVALUATE (bug 1316845): Not obvious that this attempt at
+        // reducing overhead is really paying off relative to making the check
+        // every iteration.
+
+        if (overhead == 0) {
+            // Check every 50 expressions -- a happy medium between
+            // memory usage and checking overhead.
+            overhead = 50;
+
+            // Checking every 50 expressions should be safe, as the
+            // baseline JIT does very little allocation per expression.
+            CHECK(alloc_.ensureBallast());
+
+            // The pushiest opcode is LOOP, which pushes two values
+            // per instance.
+            CHECK(stk_.reserve(stk_.length() + overhead * 2));
+        }
+
+        overhead--;
+
+        OpBytes op;
+        CHECK(iter_.readOp(&op));
+
+        // When debugEnabled_, every operator has breakpoint site but Op::End.
+        if (debugEnabled_ && op.b0 != (uint16_t)Op::End) {
+            // TODO sync only registers that can be clobbered by the exit
+            // prologue/epilogue or disable these registers for use in
+            // baseline compiler when debugEnabled_ is set.
+            sync();
+
+            insertBreakablePoint(CallSiteDesc::Breakpoint);
+        }
+
+        switch (op.b0) {
+          case uint16_t(Op::End):
+            if (!emitEnd())
+                return false;
+
+            if (iter_.controlStackEmpty()) {
+                if (!deadCode_)
+                    doReturn(sig().ret(), PopStack(false));
+                return iter_.readFunctionEnd(iter_.end());
+            }
+            NEXT();
+
+          // Control opcodes
+          case uint16_t(Op::Nop):
+            CHECK_NEXT(iter_.readNop());
+          case uint16_t(Op::Drop):
+            CHECK_NEXT(emitDrop());
+          case uint16_t(Op::Block):
+            CHECK_NEXT(emitBlock());
+          case uint16_t(Op::Loop):
+            CHECK_NEXT(emitLoop());
+          case uint16_t(Op::If):
+            CHECK_NEXT(emitIf());
+          case uint16_t(Op::Else):
+            CHECK_NEXT(emitElse());
+          case uint16_t(Op::Br):
+            CHECK_NEXT(emitBr());
+          case uint16_t(Op::BrIf):
+            CHECK_NEXT(emitBrIf());
+          case uint16_t(Op::BrTable):
+            CHECK_NEXT(emitBrTable());
+          case uint16_t(Op::Return):
+            CHECK_NEXT(emitReturn());
+          case uint16_t(Op::Unreachable):
+            CHECK(iter_.readUnreachable());
+            if (!deadCode_) {
+                unreachableTrap();
+                deadCode_ = true;
+            }
+            NEXT();
+
+          // Calls
+          case uint16_t(Op::Call):
+            CHECK_NEXT(emitCall());
+          case uint16_t(Op::CallIndirect):
+            CHECK_NEXT(emitCallIndirect());
+
+          // Locals and globals
+          case uint16_t(Op::GetLocal):
+            CHECK_NEXT(emitGetLocal());
+          case uint16_t(Op::SetLocal):
+            CHECK_NEXT(emitSetLocal());
+          case uint16_t(Op::TeeLocal):
+            CHECK_NEXT(emitTeeLocal());
+          case uint16_t(Op::GetGlobal):
+            CHECK_NEXT(emitGetGlobal());
+          case uint16_t(Op::SetGlobal):
+            CHECK_NEXT(emitSetGlobal());
+
+          // Select
+          case uint16_t(Op::Select):
+            CHECK_NEXT(emitSelect());
+
+          // I32
+          case uint16_t(Op::I32Const): {
+            int32_t i32;
+            CHECK(iter_.readI32Const(&i32));
+            if (!deadCode_)
+                pushI32(i32);
+            NEXT();
+          }
+          case uint16_t(Op::I32Add):
+            CHECK_NEXT(emitBinary(emitAddI32, ValType::I32));
+          case uint16_t(Op::I32Sub):
+            CHECK_NEXT(emitBinary(emitSubtractI32, ValType::I32));
+          case uint16_t(Op::I32Mul):
+            CHECK_NEXT(emitBinary(emitMultiplyI32, ValType::I32));
+          case uint16_t(Op::I32DivS):
+            CHECK_NEXT(emitBinary(emitQuotientI32, ValType::I32));
+          case uint16_t(Op::I32DivU):
+            CHECK_NEXT(emitBinary(emitQuotientU32, ValType::I32));
+          case uint16_t(Op::I32RemS):
+            CHECK_NEXT(emitBinary(emitRemainderI32, ValType::I32));
+          case uint16_t(Op::I32RemU):
+            CHECK_NEXT(emitBinary(emitRemainderU32, ValType::I32));
+          case uint16_t(Op::I32Eqz):
+            CHECK_NEXT(emitConversion(emitEqzI32, ValType::I32, ValType::I32));
+          case uint16_t(Op::I32TruncSF32):
+            CHECK_NEXT(emitConversionOOM(emitTruncateF32ToI32<false>, ValType::F32, ValType::I32));
+          case uint16_t(Op::I32TruncUF32):
+            CHECK_NEXT(emitConversionOOM(emitTruncateF32ToI32<true>, ValType::F32, ValType::I32));
+          case uint16_t(Op::I32TruncSF64):
+            CHECK_NEXT(emitConversionOOM(emitTruncateF64ToI32<false>, ValType::F64, ValType::I32));
+          case uint16_t(Op::I32TruncUF64):
+            CHECK_NEXT(emitConversionOOM(emitTruncateF64ToI32<true>, ValType::F64, ValType::I32));
+          case uint16_t(Op::I32WrapI64):
+            CHECK_NEXT(emitConversion(emitWrapI64ToI32, ValType::I64, ValType::I32));
+          case uint16_t(Op::I32ReinterpretF32):
+            CHECK_NEXT(emitConversion(emitReinterpretF32AsI32, ValType::F32, ValType::I32));
+          case uint16_t(Op::I32Clz):
+            CHECK_NEXT(emitUnary(emitClzI32, ValType::I32));
+          case uint16_t(Op::I32Ctz):
+            CHECK_NEXT(emitUnary(emitCtzI32, ValType::I32));
+          case uint16_t(Op::I32Popcnt):
+            CHECK_NEXT(emitUnary(emitPopcntI32, ValType::I32));
+          case uint16_t(Op::I32Or):
+            CHECK_NEXT(emitBinary(emitOrI32, ValType::I32));
+          case uint16_t(Op::I32And):
+            CHECK_NEXT(emitBinary(emitAndI32, ValType::I32));
+          case uint16_t(Op::I32Xor):
+            CHECK_NEXT(emitBinary(emitXorI32, ValType::I32));
+          case uint16_t(Op::I32Shl):
+            CHECK_NEXT(emitBinary(emitShlI32, ValType::I32));
+          case uint16_t(Op::I32ShrS):
+            CHECK_NEXT(emitBinary(emitShrI32, ValType::I32));
+          case uint16_t(Op::I32ShrU):
+            CHECK_NEXT(emitBinary(emitShrU32, ValType::I32));
+          case uint16_t(Op::I32Load8S):
+            CHECK_NEXT(emitLoad(ValType::I32, Scalar::Int8));
+          case uint16_t(Op::I32Load8U):
+            CHECK_NEXT(emitLoad(ValType::I32, Scalar::Uint8));
+          case uint16_t(Op::I32Load16S):
+            CHECK_NEXT(emitLoad(ValType::I32, Scalar::Int16));
+          case uint16_t(Op::I32Load16U):
+            CHECK_NEXT(emitLoad(ValType::I32, Scalar::Uint16));
+          case uint16_t(Op::I32Load):
+            CHECK_NEXT(emitLoad(ValType::I32, Scalar::Int32));
+          case uint16_t(Op::I32Store8):
+            CHECK_NEXT(emitStore(ValType::I32, Scalar::Int8));
+          case uint16_t(Op::I32Store16):
+            CHECK_NEXT(emitStore(ValType::I32, Scalar::Int16));
+          case uint16_t(Op::I32Store):
+            CHECK_NEXT(emitStore(ValType::I32, Scalar::Int32));
+          case uint16_t(Op::I32Rotr):
+            CHECK_NEXT(emitBinary(emitRotrI32, ValType::I32));
+          case uint16_t(Op::I32Rotl):
+            CHECK_NEXT(emitBinary(emitRotlI32, ValType::I32));
+
+          // I64
+          case uint16_t(Op::I64Const): {
+            int64_t i64;
+            CHECK(iter_.readI64Const(&i64));
+            if (!deadCode_)
+                pushI64(i64);
+            NEXT();
+          }
+          case uint16_t(Op::I64Add):
+            CHECK_NEXT(emitBinary(emitAddI64, ValType::I64));
+          case uint16_t(Op::I64Sub):
+            CHECK_NEXT(emitBinary(emitSubtractI64, ValType::I64));
+          case uint16_t(Op::I64Mul):
+            CHECK_NEXT(emitBinary(emitMultiplyI64, ValType::I64));
+          case uint16_t(Op::I64DivS):
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+            CHECK_NEXT(emitIntDivCallout(emitDivOrModI64BuiltinCall, SymbolicAddress::DivI64,
+                                         ValType::I64));
+#else
+            CHECK_NEXT(emitBinary(emitQuotientI64, ValType::I64));
+#endif
+          case uint16_t(Op::I64DivU):
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+            CHECK_NEXT(emitIntDivCallout(emitDivOrModI64BuiltinCall, SymbolicAddress::UDivI64,
+                                         ValType::I64));
+#else
+            CHECK_NEXT(emitBinary(emitQuotientU64, ValType::I64));
+#endif
+          case uint16_t(Op::I64RemS):
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+            CHECK_NEXT(emitIntDivCallout(emitDivOrModI64BuiltinCall, SymbolicAddress::ModI64,
+                                         ValType::I64));
+#else
+            CHECK_NEXT(emitBinary(emitRemainderI64, ValType::I64));
+#endif
+          case uint16_t(Op::I64RemU):
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+            CHECK_NEXT(emitIntDivCallout(emitDivOrModI64BuiltinCall, SymbolicAddress::UModI64,
+                                         ValType::I64));
+#else
+            CHECK_NEXT(emitBinary(emitRemainderU64, ValType::I64));
+#endif
+          case uint16_t(Op::I64TruncSF32):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+            CHECK_NEXT(emitCalloutConversionOOM(emitConvertFloatingToInt64Callout,
+                                                SymbolicAddress::TruncateDoubleToInt64,
+                                                ValType::F32, ValType::I64));
+#else
+            CHECK_NEXT(emitConversionOOM(emitTruncateF32ToI64<false>, ValType::F32, ValType::I64));
+#endif
+          case uint16_t(Op::I64TruncUF32):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+            CHECK_NEXT(emitCalloutConversionOOM(emitConvertFloatingToInt64Callout,
+                                                SymbolicAddress::TruncateDoubleToUint64,
+                                                ValType::F32, ValType::I64));
+#else
+            CHECK_NEXT(emitConversionOOM(emitTruncateF32ToI64<true>, ValType::F32, ValType::I64));
+#endif
+          case uint16_t(Op::I64TruncSF64):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+            CHECK_NEXT(emitCalloutConversionOOM(emitConvertFloatingToInt64Callout,
+                                                SymbolicAddress::TruncateDoubleToInt64,
+                                                ValType::F64, ValType::I64));
+#else
+            CHECK_NEXT(emitConversionOOM(emitTruncateF64ToI64<false>, ValType::F64, ValType::I64));
+#endif
+          case uint16_t(Op::I64TruncUF64):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+            CHECK_NEXT(emitCalloutConversionOOM(emitConvertFloatingToInt64Callout,
+                                                SymbolicAddress::TruncateDoubleToUint64,
+                                                ValType::F64, ValType::I64));
+#else
+            CHECK_NEXT(emitConversionOOM(emitTruncateF64ToI64<true>, ValType::F64, ValType::I64));
+#endif
+          case uint16_t(Op::I64ExtendSI32):
+            CHECK_NEXT(emitConversion(emitExtendI32ToI64, ValType::I32, ValType::I64));
+          case uint16_t(Op::I64ExtendUI32):
+            CHECK_NEXT(emitConversion(emitExtendU32ToI64, ValType::I32, ValType::I64));
+          case uint16_t(Op::I64ReinterpretF64):
+            CHECK_NEXT(emitConversion(emitReinterpretF64AsI64, ValType::F64, ValType::I64));
+          case uint16_t(Op::I64Or):
+            CHECK_NEXT(emitBinary(emitOrI64, ValType::I64));
+          case uint16_t(Op::I64And):
+            CHECK_NEXT(emitBinary(emitAndI64, ValType::I64));
+          case uint16_t(Op::I64Xor):
+            CHECK_NEXT(emitBinary(emitXorI64, ValType::I64));
+          case uint16_t(Op::I64Shl):
+            CHECK_NEXT(emitBinary(emitShlI64, ValType::I64));
+          case uint16_t(Op::I64ShrS):
+            CHECK_NEXT(emitBinary(emitShrI64, ValType::I64));
+          case uint16_t(Op::I64ShrU):
+            CHECK_NEXT(emitBinary(emitShrU64, ValType::I64));
+          case uint16_t(Op::I64Rotr):
+            CHECK_NEXT(emitBinary(emitRotrI64, ValType::I64));
+          case uint16_t(Op::I64Rotl):
+            CHECK_NEXT(emitBinary(emitRotlI64, ValType::I64));
+          case uint16_t(Op::I64Clz):
+            CHECK_NEXT(emitUnary(emitClzI64, ValType::I64));
+          case uint16_t(Op::I64Ctz):
+            CHECK_NEXT(emitUnary(emitCtzI64, ValType::I64));
+          case uint16_t(Op::I64Popcnt):
+            CHECK_NEXT(emitUnary(emitPopcntI64, ValType::I64));
+          case uint16_t(Op::I64Eqz):
+            CHECK_NEXT(emitConversion(emitEqzI64, ValType::I64, ValType::I32));
+          case uint16_t(Op::I64Load8S):
+            CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int8));
+          case uint16_t(Op::I64Load16S):
+            CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int16));
+          case uint16_t(Op::I64Load32S):
+            CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int32));
+          case uint16_t(Op::I64Load8U):
+            CHECK_NEXT(emitLoad(ValType::I64, Scalar::Uint8));
+          case uint16_t(Op::I64Load16U):
+            CHECK_NEXT(emitLoad(ValType::I64, Scalar::Uint16));
+          case uint16_t(Op::I64Load32U):
+            CHECK_NEXT(emitLoad(ValType::I64, Scalar::Uint32));
+          case uint16_t(Op::I64Load):
+            CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int64));
+          case uint16_t(Op::I64Store8):
+            CHECK_NEXT(emitStore(ValType::I64, Scalar::Int8));
+          case uint16_t(Op::I64Store16):
+            CHECK_NEXT(emitStore(ValType::I64, Scalar::Int16));
+          case uint16_t(Op::I64Store32):
+            CHECK_NEXT(emitStore(ValType::I64, Scalar::Int32));
+          case uint16_t(Op::I64Store):
+            CHECK_NEXT(emitStore(ValType::I64, Scalar::Int64));
+
+          // F32
+          case uint16_t(Op::F32Const): {
+            float f32;
+            CHECK(iter_.readF32Const(&f32));
+            if (!deadCode_)
+                pushF32(f32);
+            NEXT();
+          }
+          case uint16_t(Op::F32Add):
+            CHECK_NEXT(emitBinary(emitAddF32, ValType::F32));
+          case uint16_t(Op::F32Sub):
+            CHECK_NEXT(emitBinary(emitSubtractF32, ValType::F32));
+          case uint16_t(Op::F32Mul):
+            CHECK_NEXT(emitBinary(emitMultiplyF32, ValType::F32));
+          case uint16_t(Op::F32Div):
+            CHECK_NEXT(emitBinary(emitDivideF32, ValType::F32));
+          case uint16_t(Op::F32Min):
+            CHECK_NEXT(emitBinary(emitMinF32, ValType::F32));
+          case uint16_t(Op::F32Max):
+            CHECK_NEXT(emitBinary(emitMaxF32, ValType::F32));
+          case uint16_t(Op::F32Neg):
+            CHECK_NEXT(emitUnary(emitNegateF32, ValType::F32));
+          case uint16_t(Op::F32Abs):
+            CHECK_NEXT(emitUnary(emitAbsF32, ValType::F32));
+          case uint16_t(Op::F32Sqrt):
+            CHECK_NEXT(emitUnary(emitSqrtF32, ValType::F32));
+          case uint16_t(Op::F32Ceil):
+            CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::CeilF, ValType::F32));
+          case uint16_t(Op::F32Floor):
+            CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::FloorF, ValType::F32));
+          case uint16_t(Op::F32DemoteF64):
+            CHECK_NEXT(emitConversion(emitConvertF64ToF32, ValType::F64, ValType::F32));
+          case uint16_t(Op::F32ConvertSI32):
+            CHECK_NEXT(emitConversion(emitConvertI32ToF32, ValType::I32, ValType::F32));
+          case uint16_t(Op::F32ConvertUI32):
+            CHECK_NEXT(emitConversion(emitConvertU32ToF32, ValType::I32, ValType::F32));
+          case uint16_t(Op::F32ConvertSI64):
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+            CHECK_NEXT(emitCalloutConversionOOM(emitConvertInt64ToFloatingCallout,
+                                                SymbolicAddress::Int64ToFloat32,
+                                                ValType::I64, ValType::F32));
+#else
+            CHECK_NEXT(emitConversion(emitConvertI64ToF32, ValType::I64, ValType::F32));
+#endif
+          case uint16_t(Op::F32ConvertUI64):
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+            CHECK_NEXT(emitCalloutConversionOOM(emitConvertInt64ToFloatingCallout,
+                                                SymbolicAddress::Uint64ToFloat32,
+                                                ValType::I64, ValType::F32));
+#else
+            CHECK_NEXT(emitConversion(emitConvertU64ToF32, ValType::I64, ValType::F32));
+#endif
+          case uint16_t(Op::F32ReinterpretI32):
+            CHECK_NEXT(emitConversion(emitReinterpretI32AsF32, ValType::I32, ValType::F32));
+          case uint16_t(Op::F32Load):
+            CHECK_NEXT(emitLoad(ValType::F32, Scalar::Float32));
+          case uint16_t(Op::F32Store):
+            CHECK_NEXT(emitStore(ValType::F32, Scalar::Float32));
+          case uint16_t(Op::F32CopySign):
+            CHECK_NEXT(emitBinary(emitCopysignF32, ValType::F32));
+          case uint16_t(Op::F32Nearest):
+            CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::NearbyIntF, ValType::F32));
+          case uint16_t(Op::F32Trunc):
+            CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::TruncF, ValType::F32));
+
+          // F64
+          case uint16_t(Op::F64Const): {
+            double f64;
+            CHECK(iter_.readF64Const(&f64));
+            if (!deadCode_)
+                pushF64(f64);
+            NEXT();
+          }
+          case uint16_t(Op::F64Add):
+            CHECK_NEXT(emitBinary(emitAddF64, ValType::F64));
+          case uint16_t(Op::F64Sub):
+            CHECK_NEXT(emitBinary(emitSubtractF64, ValType::F64));
+          case uint16_t(Op::F64Mul):
+            CHECK_NEXT(emitBinary(emitMultiplyF64, ValType::F64));
+          case uint16_t(Op::F64Div):
+            CHECK_NEXT(emitBinary(emitDivideF64, ValType::F64));
+          case uint16_t(Op::F64Min):
+            CHECK_NEXT(emitBinary(emitMinF64, ValType::F64));
+          case uint16_t(Op::F64Max):
+            CHECK_NEXT(emitBinary(emitMaxF64, ValType::F64));
+          case uint16_t(Op::F64Neg):
+            CHECK_NEXT(emitUnary(emitNegateF64, ValType::F64));
+          case uint16_t(Op::F64Abs):
+            CHECK_NEXT(emitUnary(emitAbsF64, ValType::F64));
+          case uint16_t(Op::F64Sqrt):
+            CHECK_NEXT(emitUnary(emitSqrtF64, ValType::F64));
+          case uint16_t(Op::F64Ceil):
+            CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::CeilD, ValType::F64));
+          case uint16_t(Op::F64Floor):
+            CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::FloorD, ValType::F64));
+          case uint16_t(Op::F64PromoteF32):
+            CHECK_NEXT(emitConversion(emitConvertF32ToF64, ValType::F32, ValType::F64));
+          case uint16_t(Op::F64ConvertSI32):
+            CHECK_NEXT(emitConversion(emitConvertI32ToF64, ValType::I32, ValType::F64));
+          case uint16_t(Op::F64ConvertUI32):
+            CHECK_NEXT(emitConversion(emitConvertU32ToF64, ValType::I32, ValType::F64));
+          case uint16_t(Op::F64ConvertSI64):
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+            CHECK_NEXT(emitCalloutConversionOOM(emitConvertInt64ToFloatingCallout,
+                                                SymbolicAddress::Int64ToDouble,
+                                                ValType::I64, ValType::F64));
+#else
+            CHECK_NEXT(emitConversion(emitConvertI64ToF64, ValType::I64, ValType::F64));
+#endif
+          case uint16_t(Op::F64ConvertUI64):
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+            CHECK_NEXT(emitCalloutConversionOOM(emitConvertInt64ToFloatingCallout,
+                                                SymbolicAddress::Uint64ToDouble,
+                                                ValType::I64, ValType::F64));
+#else
+            CHECK_NEXT(emitConversion(emitConvertU64ToF64, ValType::I64, ValType::F64));
+#endif
+          case uint16_t(Op::F64Load):
+            CHECK_NEXT(emitLoad(ValType::F64, Scalar::Float64));
+          case uint16_t(Op::F64Store):
+            CHECK_NEXT(emitStore(ValType::F64, Scalar::Float64));
+          case uint16_t(Op::F64ReinterpretI64):
+            CHECK_NEXT(emitConversion(emitReinterpretI64AsF64, ValType::I64, ValType::F64));
+          case uint16_t(Op::F64CopySign):
+            CHECK_NEXT(emitBinary(emitCopysignF64, ValType::F64));
+          case uint16_t(Op::F64Nearest):
+            CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::NearbyIntD, ValType::F64));
+          case uint16_t(Op::F64Trunc):
+            CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::TruncD, ValType::F64));
+
+          // Comparisons
+          case uint16_t(Op::I32Eq):
+            CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, Assembler::Equal));
+          case uint16_t(Op::I32Ne):
+            CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, Assembler::NotEqual));
+          case uint16_t(Op::I32LtS):
+            CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, Assembler::LessThan));
+          case uint16_t(Op::I32LeS):
+            CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, Assembler::LessThanOrEqual));
+          case uint16_t(Op::I32GtS):
+            CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, Assembler::GreaterThan));
+          case uint16_t(Op::I32GeS):
+            CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, Assembler::GreaterThanOrEqual));
+          case uint16_t(Op::I32LtU):
+            CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, Assembler::Below));
+          case uint16_t(Op::I32LeU):
+            CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, Assembler::BelowOrEqual));
+          case uint16_t(Op::I32GtU):
+            CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, Assembler::Above));
+          case uint16_t(Op::I32GeU):
+            CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, Assembler::AboveOrEqual));
+          case uint16_t(Op::I64Eq):
+            CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, Assembler::Equal));
+          case uint16_t(Op::I64Ne):
+            CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, Assembler::NotEqual));
+          case uint16_t(Op::I64LtS):
+            CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, Assembler::LessThan));
+          case uint16_t(Op::I64LeS):
+            CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, Assembler::LessThanOrEqual));
+          case uint16_t(Op::I64GtS):
+            CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, Assembler::GreaterThan));
+          case uint16_t(Op::I64GeS):
+            CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, Assembler::GreaterThanOrEqual));
+          case uint16_t(Op::I64LtU):
+            CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, Assembler::Below));
+          case uint16_t(Op::I64LeU):
+            CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, Assembler::BelowOrEqual));
+          case uint16_t(Op::I64GtU):
+            CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, Assembler::Above));
+          case uint16_t(Op::I64GeU):
+            CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, Assembler::AboveOrEqual));
+          case uint16_t(Op::F32Eq):
+            CHECK_NEXT(emitComparison(emitCompareF32, ValType::F32, Assembler::DoubleEqual));
+          case uint16_t(Op::F32Ne):
+            CHECK_NEXT(emitComparison(emitCompareF32, ValType::F32, Assembler::DoubleNotEqualOrUnordered));
+          case uint16_t(Op::F32Lt):
+            CHECK_NEXT(emitComparison(emitCompareF32, ValType::F32, Assembler::DoubleLessThan));
+          case uint16_t(Op::F32Le):
+            CHECK_NEXT(emitComparison(emitCompareF32, ValType::F32, Assembler::DoubleLessThanOrEqual));
+          case uint16_t(Op::F32Gt):
+            CHECK_NEXT(emitComparison(emitCompareF32, ValType::F32, Assembler::DoubleGreaterThan));
+          case uint16_t(Op::F32Ge):
+            CHECK_NEXT(emitComparison(emitCompareF32, ValType::F32, Assembler::DoubleGreaterThanOrEqual));
+          case uint16_t(Op::F64Eq):
+            CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, Assembler::DoubleEqual));
+          case uint16_t(Op::F64Ne):
+            CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, Assembler::DoubleNotEqualOrUnordered));
+          case uint16_t(Op::F64Lt):
+            CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, Assembler::DoubleLessThan));
+          case uint16_t(Op::F64Le):
+            CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, Assembler::DoubleLessThanOrEqual));
+          case uint16_t(Op::F64Gt):
+            CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, Assembler::DoubleGreaterThan));
+          case uint16_t(Op::F64Ge):
+            CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, Assembler::DoubleGreaterThanOrEqual));
+
+          // Sign extensions
+#ifdef ENABLE_WASM_THREAD_OPS
+          case uint16_t(Op::I32Extend8S):
+            CHECK_NEXT(emitConversion(emitExtendI32_8, ValType::I32, ValType::I32));
+          case uint16_t(Op::I32Extend16S):
+            CHECK_NEXT(emitConversion(emitExtendI32_16, ValType::I32, ValType::I32));
+          case uint16_t(Op::I64Extend8S):
+            CHECK_NEXT(emitConversion(emitExtendI64_8, ValType::I64, ValType::I64));
+          case uint16_t(Op::I64Extend16S):
+            CHECK_NEXT(emitConversion(emitExtendI64_16, ValType::I64, ValType::I64));
+          case uint16_t(Op::I64Extend32S):
+            CHECK_NEXT(emitConversion(emitExtendI64_32, ValType::I64, ValType::I64));
+#endif
+
+          // Memory Related
+          case uint16_t(Op::GrowMemory):
+            CHECK_NEXT(emitGrowMemory());
+          case uint16_t(Op::CurrentMemory):
+            CHECK_NEXT(emitCurrentMemory());
+
+          // Thread operations
+          case uint16_t(Op::ThreadPrefix): {
+#ifdef ENABLE_WASM_THREAD_OPS
+            switch (op.b1) {
+              case uint16_t(ThreadOp::Wake):
+                CHECK_NEXT(emitWake());
+
+              case uint16_t(ThreadOp::I32Wait):
+                CHECK_NEXT(emitWait(ValType::I32, 4));
+              case uint16_t(ThreadOp::I64Wait):
+                CHECK_NEXT(emitWait(ValType::I64, 8));
+
+              case uint16_t(ThreadOp::I32AtomicLoad):
+                CHECK_NEXT(emitAtomicLoad(ValType::I32, Scalar::Int32));
+              case uint16_t(ThreadOp::I64AtomicLoad):
+                CHECK_NEXT(emitAtomicLoad(ValType::I64, Scalar::Int64));
+              case uint16_t(ThreadOp::I32AtomicLoad8U):
+                CHECK_NEXT(emitAtomicLoad(ValType::I32, Scalar::Uint8));
+              case uint16_t(ThreadOp::I32AtomicLoad16U):
+                CHECK_NEXT(emitAtomicLoad(ValType::I32, Scalar::Uint16));
+              case uint16_t(ThreadOp::I64AtomicLoad8U):
+                CHECK_NEXT(emitAtomicLoad(ValType::I64, Scalar::Uint8));
+              case uint16_t(ThreadOp::I64AtomicLoad16U):
+                CHECK_NEXT(emitAtomicLoad(ValType::I64, Scalar::Uint16));
+              case uint16_t(ThreadOp::I64AtomicLoad32U):
+                CHECK_NEXT(emitAtomicLoad(ValType::I64, Scalar::Uint32));
+
+              case uint16_t(ThreadOp::I32AtomicStore):
+                CHECK_NEXT(emitAtomicStore(ValType::I32, Scalar::Int32));
+              case uint16_t(ThreadOp::I64AtomicStore):
+                CHECK_NEXT(emitAtomicStore(ValType::I64, Scalar::Int64));
+              case uint16_t(ThreadOp::I32AtomicStore8U):
+                CHECK_NEXT(emitAtomicStore(ValType::I32, Scalar::Uint8));
+              case uint16_t(ThreadOp::I32AtomicStore16U):
+                CHECK_NEXT(emitAtomicStore(ValType::I32, Scalar::Uint16));
+              case uint16_t(ThreadOp::I64AtomicStore8U):
+                CHECK_NEXT(emitAtomicStore(ValType::I64, Scalar::Uint8));
+              case uint16_t(ThreadOp::I64AtomicStore16U):
+                CHECK_NEXT(emitAtomicStore(ValType::I64, Scalar::Uint16));
+              case uint16_t(ThreadOp::I64AtomicStore32U):
+                CHECK_NEXT(emitAtomicStore(ValType::I64, Scalar::Uint32));
+
+              case uint16_t(ThreadOp::I32AtomicAdd):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchAddOp));
+              case uint16_t(ThreadOp::I64AtomicAdd):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchAddOp));
+              case uint16_t(ThreadOp::I32AtomicAdd8U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchAddOp));
+              case uint16_t(ThreadOp::I32AtomicAdd16U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchAddOp));
+              case uint16_t(ThreadOp::I64AtomicAdd8U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchAddOp));
+              case uint16_t(ThreadOp::I64AtomicAdd16U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchAddOp));
+              case uint16_t(ThreadOp::I64AtomicAdd32U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchAddOp));
+
+              case uint16_t(ThreadOp::I32AtomicSub):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchSubOp));
+              case uint16_t(ThreadOp::I64AtomicSub):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchSubOp));
+              case uint16_t(ThreadOp::I32AtomicSub8U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchSubOp));
+              case uint16_t(ThreadOp::I32AtomicSub16U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchSubOp));
+              case uint16_t(ThreadOp::I64AtomicSub8U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchSubOp));
+              case uint16_t(ThreadOp::I64AtomicSub16U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchSubOp));
+              case uint16_t(ThreadOp::I64AtomicSub32U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchSubOp));
+
+              case uint16_t(ThreadOp::I32AtomicAnd):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchAndOp));
+              case uint16_t(ThreadOp::I64AtomicAnd):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchAndOp));
+              case uint16_t(ThreadOp::I32AtomicAnd8U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchAndOp));
+              case uint16_t(ThreadOp::I32AtomicAnd16U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchAndOp));
+              case uint16_t(ThreadOp::I64AtomicAnd8U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchAndOp));
+              case uint16_t(ThreadOp::I64AtomicAnd16U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchAndOp));
+              case uint16_t(ThreadOp::I64AtomicAnd32U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchAndOp));
+
+              case uint16_t(ThreadOp::I32AtomicOr):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchOrOp));
+              case uint16_t(ThreadOp::I64AtomicOr):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchOrOp));
+              case uint16_t(ThreadOp::I32AtomicOr8U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchOrOp));
+              case uint16_t(ThreadOp::I32AtomicOr16U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchOrOp));
+              case uint16_t(ThreadOp::I64AtomicOr8U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchOrOp));
+              case uint16_t(ThreadOp::I64AtomicOr16U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchOrOp));
+              case uint16_t(ThreadOp::I64AtomicOr32U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchOrOp));
+
+              case uint16_t(ThreadOp::I32AtomicXor):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchXorOp));
+              case uint16_t(ThreadOp::I64AtomicXor):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchXorOp));
+              case uint16_t(ThreadOp::I32AtomicXor8U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchXorOp));
+              case uint16_t(ThreadOp::I32AtomicXor16U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchXorOp));
+              case uint16_t(ThreadOp::I64AtomicXor8U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchXorOp));
+              case uint16_t(ThreadOp::I64AtomicXor16U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchXorOp));
+              case uint16_t(ThreadOp::I64AtomicXor32U):
+                CHECK_NEXT(emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchXorOp));
+
+              case uint16_t(ThreadOp::I32AtomicXchg):
+                CHECK_NEXT(emitAtomicXchg(ValType::I32, Scalar::Int32));
+              case uint16_t(ThreadOp::I64AtomicXchg):
+                CHECK_NEXT(emitAtomicXchg(ValType::I64, Scalar::Int64));
+              case uint16_t(ThreadOp::I32AtomicXchg8U):
+                CHECK_NEXT(emitAtomicXchg(ValType::I32, Scalar::Uint8));
+              case uint16_t(ThreadOp::I32AtomicXchg16U):
+                CHECK_NEXT(emitAtomicXchg(ValType::I32, Scalar::Uint16));
+              case uint16_t(ThreadOp::I64AtomicXchg8U):
+                CHECK_NEXT(emitAtomicXchg(ValType::I64, Scalar::Uint8));
+              case uint16_t(ThreadOp::I64AtomicXchg16U):
+                CHECK_NEXT(emitAtomicXchg(ValType::I64, Scalar::Uint16));
+              case uint16_t(ThreadOp::I64AtomicXchg32U):
+                CHECK_NEXT(emitAtomicXchg(ValType::I64, Scalar::Uint32));
+
+              case uint16_t(ThreadOp::I32AtomicCmpXchg):
+                CHECK_NEXT(emitAtomicCmpXchg(ValType::I32, Scalar::Int32));
+              case uint16_t(ThreadOp::I64AtomicCmpXchg):
+                CHECK_NEXT(emitAtomicCmpXchg(ValType::I64, Scalar::Int64));
+              case uint16_t(ThreadOp::I32AtomicCmpXchg8U):
+                CHECK_NEXT(emitAtomicCmpXchg(ValType::I32, Scalar::Uint8));
+              case uint16_t(ThreadOp::I32AtomicCmpXchg16U):
+                CHECK_NEXT(emitAtomicCmpXchg(ValType::I32, Scalar::Uint16));
+              case uint16_t(ThreadOp::I64AtomicCmpXchg8U):
+                CHECK_NEXT(emitAtomicCmpXchg(ValType::I64, Scalar::Uint8));
+              case uint16_t(ThreadOp::I64AtomicCmpXchg16U):
+                CHECK_NEXT(emitAtomicCmpXchg(ValType::I64, Scalar::Uint16));
+              case uint16_t(ThreadOp::I64AtomicCmpXchg32U):
+                CHECK_NEXT(emitAtomicCmpXchg(ValType::I64, Scalar::Uint32));
+
+              default:
+                return iter_.unrecognizedOpcode(&op);
+            }
+#else
+            return iter_.unrecognizedOpcode(&op);
+#endif  // ENABLE_WASM_THREAD_OPS
+            break;
+          }
+
+          // asm.js operations
+          case uint16_t(Op::MozPrefix):
+            return iter_.unrecognizedOpcode(&op);
+
+          default:
+            return iter_.unrecognizedOpcode(&op);
+        }
+
+#undef CHECK
+#undef NEXT
+#undef CHECK_NEXT
+#undef emitBinary
+#undef emitUnary
+#undef emitComparison
+#undef emitConversion
+#undef emitConversionOOM
+#undef emitCalloutConversionOOM
+
+        MOZ_CRASH("unreachable");
+    }
+
+    MOZ_CRASH("unreachable");
+}
+
+bool
+BaseCompiler::emitFunction()
+{
+    beginFunction();
+
+    if (!emitBody())
+        return false;
+
+    if (!endFunction())
+        return false;
+
+    return true;
+}
+
+void
+BaseCompiler::emitInitStackLocals()
+{
+    MOZ_ASSERT(varLow_ < varHigh_, "there should be stack locals to initialize");
+
+    static const uint32_t wordSize = sizeof(void*);
+
+    // A local's localOffset always points above it in the frame, so that when
+    // translated to a stack address we end up with an address pointing to the
+    // base of the local.  Thus to go from a raw frame offset to an SP offset we
+    // first add K to the frame offset to obtain a localOffset for a slot of
+    // size K, and then map that to an SP offset.  Hence all the adjustments to
+    // `low` in the offset calculations below.
+
+    // On 64-bit systems we may have 32-bit alignment for the local area as it
+    // may be preceded by parameters and prologue/debug data.
+
+    uint32_t low = varLow_;
+    if (low % wordSize) {
+        masm.store32(Imm32(0), Address(StackPointer, localOffsetToSPOffset(low + 4)));
+        low += 4;
+    }
+    MOZ_ASSERT(low % wordSize == 0);
+
+    const uint32_t high = AlignBytes(varHigh_, wordSize);
+    MOZ_ASSERT(high <= uint32_t(localSize_), "localSize_ should be aligned at least that");
+
+    // An unrollLimit of 16 is chosen so that we only need an 8-bit signed
+    // immediate to represent the offset in the store instructions in the loop
+    // on x64.
+
+    const uint32_t unrollLimit = 16;
+    const uint32_t initWords = (high - low) / wordSize;
+    const uint32_t tailWords = initWords % unrollLimit;
+    const uint32_t loopHigh = high - (tailWords * wordSize);
+
+    // With only one word to initialize, just store an immediate zero.
+
+    if (initWords == 1) {
+        masm.storePtr(ImmWord(0), Address(StackPointer, localOffsetToSPOffset(low + wordSize)));
+        return;
+    }
+
+    // For other cases, it's best to have a zero in a register.
+    //
+    // One can do more here with SIMD registers (store 16 bytes at a time) or
+    // with instructions like STRD on ARM (store 8 bytes at a time), but that's
+    // for another day.
+
+    RegI32 zero = needI32();
+    masm.mov(ImmWord(0), zero);
+
+    // For the general case we want to have a loop body of unrollLimit stores
+    // and then a tail of less than unrollLimit stores.  When initWords is less
+    // than 2*unrollLimit the loop trip count is at most 1 and there is no
+    // benefit to having the pointer calculations and the compare-and-branch.
+    // So we completely unroll when we have initWords < 2 * unrollLimit.  (In
+    // this case we'll end up using 32-bit offsets on x64 for up to half of the
+    // stores, though.)
+
+    // Fully-unrolled case.
+
+    if (initWords < 2 * unrollLimit)  {
+        for (uint32_t i = low; i < high; i += wordSize)
+            masm.storePtr(zero, Address(StackPointer, localOffsetToSPOffset(i + wordSize)));
+        freeI32(zero);
+        return;
+    }
+
+    // Unrolled loop with a tail. Stores will use negative offsets. That's OK
+    // for x86 and ARM, at least.
+
+    // Compute pointer to the highest-addressed slot on the frame.
+    RegI32 p = needI32();
+    masm.computeEffectiveAddress(Address(StackPointer, localOffsetToSPOffset(low + wordSize)),
+                                 p);
+
+    // Compute pointer to the lowest-addressed slot on the frame that will be
+    // initialized by the loop body.
+    RegI32 lim = needI32();
+    masm.computeEffectiveAddress(Address(StackPointer,
+                                         localOffsetToSPOffset(loopHigh + wordSize)),
+                                 lim);
+
+    // The loop body.  Eventually we'll have p == lim and exit the loop.
+    Label again;
+    masm.bind(&again);
+    for (uint32_t i = 0; i < unrollLimit; ++i)
+        masm.storePtr(zero, Address(p, -(wordSize * i)));
+    masm.subPtr(Imm32(unrollLimit * wordSize), p);
+    masm.branchPtr(Assembler::LessThan, lim, p, &again);
+
+    // The tail.
+    for (uint32_t i = 0; i < tailWords; ++i)
+        masm.storePtr(zero, Address(p, -(wordSize * i)));
+
+    freeI32(p);
+    freeI32(lim);
+    freeI32(zero);
+}
+
+BaseCompiler::BaseCompiler(const ModuleEnvironment& env,
+                           Decoder& decoder,
+                           const FuncCompileInput& func,
+                           const ValTypeVector& locals,
+                           bool debugEnabled,
+                           TempAllocator* alloc,
+                           MacroAssembler* masm,
+                           CompileMode mode)
+    : env_(env),
+      iter_(env, decoder),
+      func_(func),
+      lastReadCallSite_(0),
+      alloc_(*alloc),
+      locals_(locals),
+      localSize_(0),
+      varLow_(0),
+      varHigh_(0),
+      maxFramePushed_(0),
+      deadCode_(false),
+      debugEnabled_(debugEnabled),
+      bceSafe_(0),
+      stackAddOffset_(0),
+      mode_(mode),
+      latentOp_(LatentOp::None),
+      latentType_(ValType::I32),
+      latentIntCmp_(Assembler::Equal),
+      latentDoubleCmp_(Assembler::DoubleEqual),
+      masm(*masm),
+      ra(*this),
+#ifdef JS_CODEGEN_X64
+      specific_rax(RegI64(Register64(rax))),
+      specific_rcx(RegI64(Register64(rcx))),
+      specific_rdx(RegI64(Register64(rdx))),
+#endif
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+      specific_eax(RegI32(eax)),
+      specific_ecx(RegI32(ecx)),
+      specific_edx(RegI32(edx)),
+      specific_edi(RegI32(edi)),
+      specific_esi(RegI32(esi)),
+#endif
+#ifdef JS_CODEGEN_X86
+      specific_ecx_ebx(RegI64(Register64(ecx, ebx))),
+      specific_edx_eax(RegI64(Register64(edx, eax))),
+      abiReturnRegI64(RegI64(Register64(edx, eax))),
+#endif
+#ifdef JS_CODEGEN_ARM
+      abiReturnRegI64(ReturnReg64),
+#endif
+      joinRegI32(RegI32(ReturnReg)),
+      joinRegI64(RegI64(ReturnReg64)),
+      joinRegF32(RegF32(ReturnFloat32Reg)),
+      joinRegF64(RegF64(ReturnDoubleReg))
+{
+}
+
+bool
+BaseCompiler::init()
+{
+    if (!SigD_.append(ValType::F64))
+        return false;
+    if (!SigF_.append(ValType::F32))
+        return false;
+    if (!SigP_.append(MIRType::Pointer))
+        return false;
+    if (!SigPI_.append(MIRType::Pointer) || !SigPI_.append(MIRType::Int32))
+        return false;
+    if (!SigPII_.append(MIRType::Pointer) || !SigPII_.append(MIRType::Int32) ||
+        !SigPII_.append(MIRType::Int32))
+    {
+        return false;
+    }
+    if (!SigPIIL_.append(MIRType::Pointer) || !SigPIIL_.append(MIRType::Int32) ||
+        !SigPIIL_.append(MIRType::Int32) || !SigPIIL_.append(MIRType::Int64))
+    {
+        return false;
+    }
+    if (!SigPILL_.append(MIRType::Pointer) || !SigPILL_.append(MIRType::Int32) ||
+        !SigPILL_.append(MIRType::Int64) || !SigPILL_.append(MIRType::Int64))
+    {
+        return false;
+    }
+
+    if (!localInfo_.resize(locals_.length()))
+        return false;
+
+    const ValTypeVector& args = sig().args();
+    BaseLocalIter i(locals_, args.length(), debugEnabled_);
+    varLow_ = i.reservedSize();
+    for (; !i.done() && i.index() < args.length(); i++) {
+        MOZ_ASSERT(i.isArg());
+        Local& l = localInfo_[i.index()];
+        l.init(i.mirType(), i.frameOffset());
+        varLow_ = i.currentLocalSize();
+    }
+
+    varHigh_ = varLow_;
+    for (; !i.done() ; i++) {
+        MOZ_ASSERT(!i.isArg());
+        Local& l = localInfo_[i.index()];
+        l.init(i.mirType(), i.frameOffset());
+        varHigh_ = i.currentLocalSize();
+    }
+
+    localSize_ = AlignBytes(varHigh_, 16u);
+
+    addInterruptCheck();
+
+    return true;
+}
+
+FuncOffsets
+BaseCompiler::finish()
+{
+    MOZ_ASSERT(done(), "all bytes must be consumed");
+    MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
+
+    masm.flushBuffer();
+
+    return offsets_;
+}
+
+} // wasm
+} // js
+
+bool
+js::wasm::BaselineCanCompile()
+{
+    // On all platforms we require signals for Wasm.
+    // If we made it this far we must have signals.
+    MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
+
+#if defined(JS_CODEGEN_ARM)
+    // Simplifying assumption: require SDIV and UDIV.
+    //
+    // I have no good data on ARM populations allowing me to say that
+    // X% of devices in the market implement SDIV and UDIV.  However,
+    // they are definitely implemented on the Cortex-A7 and Cortex-A15
+    // and on all ARMv8 systems.
+    if (!HasIDIV())
+        return false;
+#endif
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+    return true;
+#else
+    return false;
+#endif
+}
+
+bool
+js::wasm::BaselineCompileFunctions(const ModuleEnvironment& env, LifoAlloc& lifo,
+                                   const FuncCompileInputVector& inputs, CompiledCode* code,
+                                   UniqueChars* error)
+{
+    MOZ_ASSERT(env.tier == Tier::Baseline);
+    MOZ_ASSERT(env.kind == ModuleKind::Wasm);
+
+    // The MacroAssembler will sometimes access the jitContext.
+
+    TempAllocator alloc(&lifo);
+    JitContext jitContext(&alloc);
+    MOZ_ASSERT(IsCompilingWasm());
+    MacroAssembler masm(MacroAssembler::WasmToken(), alloc);
+
+    // Swap in already-allocated empty vectors to avoid malloc/free.
+    MOZ_ASSERT(code->empty());
+    if (!code->swap(masm))
+        return false;
+
+    for (const FuncCompileInput& func : inputs) {
+        Decoder d(func.begin, func.end, func.lineOrBytecode, error);
+
+        // Build the local types vector.
+
+        ValTypeVector locals;
+        if (!locals.appendAll(env.funcSigs[func.index]->args()))
+            return false;
+        if (!DecodeLocalEntries(d, env.kind, &locals))
+            return false;
+
+        // One-pass baseline compilation.
+
+        BaseCompiler f(env, d, func, locals, env.debugEnabled(), &alloc, &masm, env.mode);
+        if (!f.init())
+            return false;
+
+        if (!f.emitFunction())
+            return false;
+
+        if (!code->codeRanges.emplaceBack(func.index, func.lineOrBytecode, f.finish()))
+            return false;
+    }
+
+    masm.finish();
+    if (masm.oom())
+        return false;
+
+    return code->swap(masm);
+}
+
+#undef RABALDR_INT_DIV_I64_CALLOUT
+#undef RABALDR_I64_TO_FLOAT_CALLOUT
+#undef RABALDR_FLOAT_TO_I64_CALLOUT
+#undef ATOMIC_PTR
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -16,16 +16,17 @@
  * limitations under the License.
  */
 
 #include "wasm/WasmIonCompile.h"
 
 #include "mozilla/MathAlgorithms.h"
 
 #include "jit/CodeGenerator.h"
+#include "jit/MIRInstruction.h"
 
 #include "wasm/WasmBaselineCompile.h"
 #include "wasm/WasmBinaryIterator.h"
 #include "wasm/WasmGenerator.h"
 #include "wasm/WasmSignalHandlers.h"
 #include "wasm/WasmValidate.h"
 
 using namespace js;