Bug 1420104 - rabaldr, reorganize and tidy up. r?bbouvier draft
authorLars T Hansen <lhansen@mozilla.com>
Thu, 23 Nov 2017 10:13:33 +0100
changeset 704214 0cfeb6736dc481ca278c35fac815ba79208cce87
parent 704213 070ec279a8b42bd9e4a66c984311e5ec6c7c1030
child 704215 e7c636d2cd1472968bbdc7c4653545533f0b1808
push id91107
push userbmo:lhansen@mozilla.com
push dateTue, 28 Nov 2017 10:55:44 +0000
reviewersbbouvier
bugs1420104
milestone59.0a1
Bug 1420104 - rabaldr, reorganize and tidy up. r?bbouvier Clean up the handling of invalid registers by adding predicates to the register wrapper types as well as static constructors to construct invalid register values. Move code around so that register management code is separated from the value stack code. Generally clean up and use abstractions that load constants and perform register-to-register moves. MozReview-Commit-ID: KpJisW1HEV2
js/src/wasm/WasmBaselineCompile.cpp
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -192,34 +192,46 @@ template<> struct RegTypeOf<MIRType::Dou
 // The strongly typed register wrappers are especially useful to distinguish
 // float registers from double registers, but they also clearly distinguish
 // 32-bit registers from 64-bit register pairs on 32-bit systems.
 
 struct RegI32 : public Register
 {
     RegI32() : Register(Register::Invalid()) {}
     explicit RegI32(Register reg) : Register(reg) {}
+    bool valid() const { return *this != Invalid(); }
+    bool invalid() const { return !valid(); }
+    static RegI32 Invalid() { return RegI32(Register::Invalid()); }
 };
 
 struct RegI64 : public Register64
 {
     RegI64() : Register64(Register64::Invalid()) {}
     explicit RegI64(Register64 reg) : Register64(reg) {}
+    bool valid() const { return *this != Invalid(); }
+    bool invalid() const { return !valid(); }
+    static RegI64 Invalid() { return RegI64(Register64::Invalid()); }
 };
 
 struct RegF32 : public FloatRegister
 {
     RegF32() : FloatRegister() {}
     explicit RegF32(FloatRegister reg) : FloatRegister(reg) {}
+    bool valid() const { return *this != Invalid(); }
+    bool invalid() const { return !valid(); }
+    static RegF32 Invalid() { return RegF32(InvalidFloatReg); }
 };
 
 struct RegF64 : public FloatRegister
 {
     RegF64() : FloatRegister() {}
     explicit RegF64(FloatRegister reg) : FloatRegister(reg) {}
+    bool valid() const { return *this != Invalid(); }
+    bool invalid() const { return !valid(); }
+    static RegF64 Invalid() { return RegF64(InvalidFloatReg); }
 };
 
 struct AnyReg
 {
     explicit AnyReg(RegI32 r) { tag = I32; i32_ = r; }
     explicit AnyReg(RegI64 r) { tag = I64; i64_ = r; }
     explicit AnyReg(RegF32 r) { tag = F32; f32_ = r; }
     explicit AnyReg(RegF64 r) { tag = F64; f64_ = r; }
@@ -1563,17 +1575,206 @@ class BaseCompiler final : public BaseCo
 
     Local& localFromSlot(uint32_t slot, MIRType type) {
         MOZ_ASSERT(localInfo_[slot].type == type);
         return localInfo_[slot];
     }
 
     ////////////////////////////////////////////////////////////
     //
-    // Value stack and high-level register allocation.
+    // High-level register management.
+
+    bool isAvailableI32(RegI32 r) { return ra.isAvailableI32(r); }
+    bool isAvailableI64(RegI64 r) { return ra.isAvailableI64(r); }
+    bool isAvailableF32(RegF32 r) { return ra.isAvailableF32(r); }
+    bool isAvailableF64(RegF64 r) { return ra.isAvailableF64(r); }
+
+    MOZ_MUST_USE RegI32 needI32() { return ra.needI32(); }
+    MOZ_MUST_USE RegI64 needI64() { return ra.needI64(); }
+    MOZ_MUST_USE RegF32 needF32() { return ra.needF32(); }
+    MOZ_MUST_USE RegF64 needF64() { return ra.needF64(); }
+
+    void needI32(RegI32 specific) { ra.needI32(specific); }
+    void needI64(RegI64 specific) { ra.needI64(specific); }
+    void needF32(RegF32 specific) { ra.needF32(specific); }
+    void needF64(RegF64 specific) { ra.needF64(specific); }
+
+#if defined(JS_CODEGEN_ARM)
+    MOZ_MUST_USE RegI64 needI64Pair() { return ra.needI64Pair(); }
+#endif
+
+    void freeI32(RegI32 r) { ra.freeI32(r); }
+    void freeI64(RegI64 r) { ra.freeI64(r); }
+    void freeF32(RegF32 r) { ra.freeF32(r); }
+    void freeF64(RegF64 r) { ra.freeF64(r); }
+
+    void freeI64Except(RegI64 r, RegI32 except) {
+#ifdef JS_PUNBOX64
+        MOZ_ASSERT(r.reg == except);
+#else
+        MOZ_ASSERT(r.high == except || r.low == except);
+        freeI64(r);
+        needI32(except);
+#endif
+    }
+
+    void maybeFreeI32(RegI32 r) {
+        if (r.valid())
+            freeI32(r);
+    }
+
+    void maybeFreeI64(RegI64 r) {
+        if (r.valid())
+            freeI64(r);
+    }
+
+    void needI32NoSync(RegI32 r) {
+        MOZ_ASSERT(isAvailableI32(r));
+        needI32(r);
+    }
+
+    // TODO / OPTIMIZE: need2xI32() can be optimized along with needI32()
+    // to avoid sync(). (Bug 1316802)
+
+    void need2xI32(RegI32 r0, RegI32 r1) {
+        needI32(r0);
+        needI32(r1);
+    }
+
+    void need2xI64(RegI64 r0, RegI64 r1) {
+        needI64(r0);
+        needI64(r1);
+    }
+
+    RegI32 fromI64(RegI64 r) {
+        return RegI32(lowPart(r));
+    }
+
+#ifdef JS_64BIT
+    RegI64 fromI32(RegI32 r) {
+        return RegI64(Register64(r));
+    }
+#endif
+
+    RegI64 widenI32(RegI32 r) {
+        MOZ_ASSERT(!isAvailableI32(r));
+#ifdef JS_PUNBOX64
+        return fromI32(r);
+#else
+        RegI32 high = needI32();
+        return RegI64(Register64(high, r));
+#endif
+    }
+
+    RegI32 narrowI64(RegI64 r) {
+#if defined(JS_PUNBOX64)
+        return RegI32(r.reg);
+#else
+        freeI32(RegI32(r.high));
+        return RegI32(r.low);
+#endif
+    }
+
+    RegI32 lowPart(RegI64 r) {
+#ifdef JS_PUNBOX64
+        return RegI32(r.reg);
+#else
+        return RegI32(r.low);
+#endif
+    }
+
+    RegI32 maybeHighPart(RegI64 r) {
+#ifdef JS_PUNBOX64
+        return RegI32::Invalid();
+#else
+        return RegI32(r.high);
+#endif
+    }
+
+    void maybeClearHighPart(RegI64 r) {
+#if !defined(JS_PUNBOX64)
+        moveImm32(0, RegI32(r.high));
+#endif
+    }
+
+    void moveI32(RegI32 src, RegI32 dest) {
+        if (src != dest)
+            masm.move32(src, dest);
+    }
+
+    void moveI64(RegI64 src, RegI64 dest) {
+        if (src != dest)
+            masm.move64(src, dest);
+    }
+
+    void moveF64(RegF64 src, RegF64 dest) {
+        if (src != dest)
+            masm.moveDouble(src, dest);
+    }
+
+    void moveF32(RegF32 src, RegF32 dest) {
+        if (src != dest)
+            masm.moveFloat32(src, dest);
+    }
+
+    void maybeReserveJoinRegI(ExprType type) {
+        if (type == ExprType::I32)
+            needI32(joinRegI32);
+        else if (type == ExprType::I64)
+            needI64(joinRegI64);
+    }
+
+    void maybeUnreserveJoinRegI(ExprType type) {
+        if (type == ExprType::I32)
+            freeI32(joinRegI32);
+        else if (type == ExprType::I64)
+            freeI64(joinRegI64);
+    }
+
+    void maybeReserveJoinReg(ExprType type) {
+        switch (type) {
+          case ExprType::I32:
+            needI32(joinRegI32);
+            break;
+          case ExprType::I64:
+            needI64(joinRegI64);
+            break;
+          case ExprType::F32:
+            needF32(joinRegF32);
+            break;
+          case ExprType::F64:
+            needF64(joinRegF64);
+            break;
+          default:
+            break;
+        }
+    }
+
+    void maybeUnreserveJoinReg(ExprType type) {
+        switch (type) {
+          case ExprType::I32:
+            freeI32(joinRegI32);
+            break;
+          case ExprType::I64:
+            freeI64(joinRegI64);
+            break;
+          case ExprType::F32:
+            freeF32(joinRegF32);
+            break;
+          case ExprType::F64:
+            freeF64(joinRegF64);
+            break;
+          default:
+            break;
+        }
+    }
+
+    ////////////////////////////////////////////////////////////
+    //
+    // Value stack and spilling.
     //
     // The value stack facilitates some on-the-fly register allocation
     // and immediate-constant use.  It tracks constants, latent
     // references to locals, register contents, and values on the CPU
     // stack.
     //
     // The stack can be flushed to memory using sync().  This is handy
     // to avoid problems with control flow and messy register usage
@@ -1661,201 +1862,46 @@ class BaseCompiler final : public BaseCo
 
     Vector<Stk, 8, SystemAllocPolicy> stk_;
 
     Stk& push() {
         stk_.infallibleEmplaceBack(Stk());
         return stk_.back();
     }
 
-    RegI32 invalidI32() {
-        return RegI32(Register::Invalid());
-    }
-
-    RegI64 invalidI64() {
-        return RegI64(Register64::Invalid());
-    }
-
-    RegF64 invalidF64() {
-        return RegF64(InvalidFloatReg);
-    }
-
-    RegI32 fromI64(RegI64 r) {
-        return RegI32(lowPart(r));
-    }
-
-#ifdef JS_64BIT
-    RegI64 fromI32(RegI32 r) {
-        return RegI64(Register64(r));
-    }
-#endif
-
-    RegI64 widenI32(RegI32 r) {
-        MOZ_ASSERT(!isAvailableI32(r));
-#ifdef JS_PUNBOX64
-        return fromI32(r);
-#else
-        RegI32 high = needI32();
-        return RegI64(Register64(high, r));
-#endif
-    }
-
-    RegI32 narrowI64(RegI64 r) {
-#if defined(JS_PUNBOX64)
-        return RegI32(r.reg);
-#else
-        freeI32(RegI32(r.high));
-        return RegI32(r.low);
-#endif
-    }
-
-    Register lowPart(RegI64 r) {
-#ifdef JS_PUNBOX64
-        return r.reg;
-#else
-        return r.low;
-#endif
-    }
-
-    Register maybeHighPart(RegI64 r) {
-#ifdef JS_PUNBOX64
-        return Register::Invalid();
-#else
-        return r.high;
-#endif
-    }
-
-    void maybeClearHighPart(RegI64 r) {
-#if !defined(JS_PUNBOX64)
-        masm.move32(Imm32(0), r.high);
-#endif
-    }
-
-    bool isAvailableI32(RegI32 r) { return ra.isAvailableI32(r); }
-    bool isAvailableI64(RegI64 r) { return ra.isAvailableI64(r); }
-    bool isAvailableF32(RegF32 r) { return ra.isAvailableF32(r); }
-    bool isAvailableF64(RegF64 r) { return ra.isAvailableF64(r); }
-
-    MOZ_MUST_USE RegI32 needI32() { return ra.needI32(); }
-    MOZ_MUST_USE RegI64 needI64() { return ra.needI64(); }
-    MOZ_MUST_USE RegF32 needF32() { return ra.needF32(); }
-    MOZ_MUST_USE RegF64 needF64() { return ra.needF64(); }
-
-    void needI32(RegI32 specific) { ra.needI32(specific); }
-    void needI64(RegI64 specific) { ra.needI64(specific); }
-    void needF32(RegF32 specific) { ra.needF32(specific); }
-    void needF64(RegF64 specific) { ra.needF64(specific); }
-
-#if defined(JS_CODEGEN_ARM)
-    MOZ_MUST_USE RegI64 needI64Pair() { return ra.needI64Pair(); }
-#endif
-
-    void freeI32(RegI32 r) { ra.freeI32(r); }
-    void freeI64(RegI64 r) { ra.freeI64(r); }
-    void freeF32(RegF32 r) { ra.freeF32(r); }
-    void freeF64(RegF64 r) { ra.freeF64(r); }
-
-    void freeI64Except(RegI64 r, RegI32 except) {
-#ifdef JS_PUNBOX64
-        MOZ_ASSERT(r.reg == except);
-#else
-        MOZ_ASSERT(r.high == except || r.low == except);
-        freeI64(r);
-        needI32(except);
-#endif
-    }
-
-    void maybeFreeI32(RegI32 r) {
-        if (r != invalidI32())
-            freeI32(r);
-    }
-
-    void maybeFreeI64(RegI64 r) {
-        if (r != invalidI64())
-            freeI64(r);
-    }
-
-    void needI32NoSync(RegI32 r) {
-        MOZ_ASSERT(isAvailableI32(r));
-        needI32(r);
-    }
-
-    // TODO / OPTIMIZE: need2xI32() can be optimized along with needI32()
-    // to avoid sync(). (Bug 1316802)
-
-    void need2xI32(RegI32 r0, RegI32 r1) {
-        needI32(r0);
-        needI32(r1);
-    }
-
-    void need2xI64(RegI64 r0, RegI64 r1) {
-        needI64(r0);
-        needI64(r1);
-    }
-
-    void moveI32(RegI32 src, RegI32 dest) {
-        if (src != dest)
-            masm.move32(src, dest);
-    }
-
-    void moveI64(RegI64 src, RegI64 dest) {
-        if (src != dest)
-            masm.move64(src, dest);
-    }
-
-    void moveF64(RegF64 src, RegF64 dest) {
-        if (src != dest)
-            masm.moveDouble(src, dest);
-    }
-
-    void moveF32(RegF32 src, RegF32 dest) {
-        if (src != dest)
-            masm.moveFloat32(src, dest);
-    }
-
-    void setI64(int64_t v, RegI64 r) {
-        masm.move64(Imm64(v), r);
-    }
-
     void loadConstI32(RegI32 r, Stk& src) {
-        masm.mov(ImmWord(uint32_t(src.i32val())), r);
-    }
-
-    void loadConstI32(RegI32 r, int32_t v) {
-        masm.mov(ImmWord(uint32_t(v)), r);
+        moveImm32(src.i32val(), r);
     }
 
     void loadMemI32(RegI32 r, Stk& src) {
         fr.loadStackI32(r, src.offs());
     }
 
     void loadLocalI32(RegI32 r, Stk& src) {
         fr.loadLocalI32(r, localFromSlot(src.slot(), MIRType::Int32));
     }
 
     void loadRegisterI32(RegI32 r, Stk& src) {
-        if (src.i32reg() != r)
-            masm.move32(src.i32reg(), r);
+        moveI32(src.i32reg(), r);
     }
 
     void loadConstI64(RegI64 r, Stk &src) {
-        masm.move64(Imm64(src.i64val()), r);
+        moveImm64(src.i64val(), r);
     }
 
     void loadMemI64(RegI64 r, Stk& src) {
         fr.loadStackI64(r, src.offs());
     }
 
     void loadLocalI64(RegI64 r, Stk& src) {
         fr.loadLocalI64(r, localFromSlot(src.slot(), MIRType::Int64));
     }
 
     void loadRegisterI64(RegI64 r, Stk& src) {
-        if (src.i64reg() != r)
-            masm.move64(src.i64reg(), r);
+        moveI64(src.i64reg(), r);
     }
 
     void loadConstF64(RegF64 r, Stk &src) {
         double d;
         src.f64val(&d);
         masm.loadConstantDouble(d, r);
     }
 
@@ -1863,18 +1909,17 @@ class BaseCompiler final : public BaseCo
         fr.loadStackF64(r, src.offs());
     }
 
     void loadLocalF64(RegF64 r, Stk& src) {
         fr.loadLocalF64(r, localFromSlot(src.slot(), MIRType::Double));
     }
 
     void loadRegisterF64(RegF64 r, Stk& src) {
-        if (src.f64reg() != r)
-            masm.moveDouble(src.f64reg(), r);
+        moveF64(src.f64reg(), r);
     }
 
     void loadConstF32(RegF32 r, Stk &src) {
         float f;
         src.f32val(&f);
         masm.loadConstantFloat32(f, r);
     }
 
@@ -1882,18 +1927,17 @@ class BaseCompiler final : public BaseCo
         fr.loadStackF32(r, src.offs());
     }
 
     void loadLocalF32(RegF32 r, Stk& src) {
         fr.loadLocalF32(r, localFromSlot(src.slot(), MIRType::Float32));
     }
 
     void loadRegisterF32(RegF32 r, Stk& src) {
-        if (src.f32reg() != r)
-            masm.moveFloat32(src.f32reg(), r);
+        moveF32(src.f32reg(), r);
     }
 
     void loadI32(RegI32 r, Stk& src) {
         switch (src.kind()) {
           case Stk::ConstI32:
             loadConstI32(r, src);
             break;
           case Stk::MemI32:
@@ -1930,48 +1974,46 @@ class BaseCompiler final : public BaseCo
             MOZ_CRASH("Compiler bug: Expected I64 on stack");
         }
     }
 
 #if !defined(JS_PUNBOX64)
     void loadI64Low(RegI32 r, Stk& src) {
         switch (src.kind()) {
           case Stk::ConstI64:
-            masm.move32(Imm64(src.i64val()).low(), r);
+            moveImm32(int32_t(src.i64val()), r);
             break;
           case Stk::MemI64:
             fr.loadStackI64Low(r, src.offs());
             break;
           case Stk::LocalI64:
             fr.loadLocalI64Low(r, localFromSlot(src.slot(), MIRType::Int64));
             break;
           case Stk::RegisterI64:
-            if (src.i64reg().low != r)
-                masm.move32(src.i64reg().low, r);
+            moveI32(RegI32(src.i64reg().low), r);
             break;
           case Stk::None:
           default:
             MOZ_CRASH("Compiler bug: Expected I64 on stack");
         }
     }
 
     void loadI64High(RegI32 r, Stk& src) {
         switch (src.kind()) {
           case Stk::ConstI64:
-            masm.move32(Imm64(src.i64val()).hi(), r);
+            moveImm32(int32_t(src.i64val() >> 32), r);
             break;
           case Stk::MemI64:
             fr.loadStackI64High(r, src.offs());
             break;
           case Stk::LocalI64:
             fr.loadLocalI64High(r, localFromSlot(src.slot(), MIRType::Int64));
             break;
           case Stk::RegisterI64:
-            if (src.i64reg().high != r)
-                masm.move32(src.i64reg().high, r);
+            moveI32(RegI32(src.i64reg().high), r);
             break;
           case Stk::None:
           default:
             MOZ_CRASH("Compiler bug: Expected I64 on stack");
         }
     }
 #endif
 
@@ -2612,68 +2654,16 @@ class BaseCompiler final : public BaseCo
             freeF64(r->f64());
             break;
           case AnyReg::F32:
             freeF32(r->f32());
             break;
         }
     }
 
-    void maybeReserveJoinRegI(ExprType type) {
-        if (type == ExprType::I32)
-            needI32(joinRegI32);
-        else if (type == ExprType::I64)
-            needI64(joinRegI64);
-    }
-
-    void maybeUnreserveJoinRegI(ExprType type) {
-        if (type == ExprType::I32)
-            freeI32(joinRegI32);
-        else if (type == ExprType::I64)
-            freeI64(joinRegI64);
-    }
-
-    void maybeReserveJoinReg(ExprType type) {
-        switch (type) {
-          case ExprType::I32:
-            needI32(joinRegI32);
-            break;
-          case ExprType::I64:
-            needI64(joinRegI64);
-            break;
-          case ExprType::F32:
-            needF32(joinRegF32);
-            break;
-          case ExprType::F64:
-            needF64(joinRegF64);
-            break;
-          default:
-            break;
-        }
-    }
-
-    void maybeUnreserveJoinReg(ExprType type) {
-        switch (type) {
-          case ExprType::I32:
-            freeI32(joinRegI32);
-            break;
-          case ExprType::I64:
-            freeI64(joinRegI64);
-            break;
-          case ExprType::F32:
-            freeF32(joinRegF32);
-            break;
-          case ExprType::F64:
-            freeF64(joinRegF64);
-            break;
-          default:
-            break;
-        }
-    }
-
     // Return the amount of execution stack consumed by the top numval
     // values on the value stack.
 
     size_t stackConsumed(size_t numval) {
         size_t size = 0;
         MOZ_ASSERT(numval <= stk_.length());
         for (uint32_t i = stk_.length() - 1; numval > 0; numval--, i--) {
             Stk& v = stk_[i];
@@ -3232,16 +3222,35 @@ class BaseCompiler final : public BaseCo
         CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Symbolic);
         masm.wasmCallBuiltinInstanceMethod(desc, instanceArg, builtin);
     }
 
     //////////////////////////////////////////////////////////////////////
     //
     // Sundry low-level code generators.
 
+    // The compiler depends on moveImm32() clearing the high bits of a 64-bit
+    // register on 64-bit systems.
+
+    void moveImm32(int32_t v, RegI32 dest) {
+        masm.mov(ImmWord(uint32_t(v)), dest);
+    }
+
+    void moveImm64(int64_t v, RegI64 r) {
+        masm.move64(Imm64(v), r);
+    }
+
+    void moveImmF32(float f, RegF32 r) {
+        masm.loadConstantFloat32(f, r);
+    }
+
+    void moveImmF64(double d, RegF64 r) {
+        masm.loadConstantDouble(d, r);
+    }
+
     void addInterruptCheck()
     {
         // Always use signals for interrupts with Asm.JS/Wasm
         MOZ_RELEASE_ASSERT(HaveSignalHandlers());
     }
 
     void jumpTable(LabelVector& labels, Label* theTable) {
         // Flush constant pools to ensure that the table is never interrupted by
@@ -3381,17 +3390,17 @@ class BaseCompiler final : public BaseCo
         masm.branchTest64(Assembler::Zero, r, r, scratch, trap(Trap::IntegerDivideByZero));
     }
 
     void checkDivideSignedOverflowI32(RegI32 rhs, RegI32 srcDest, Label* done, bool zeroOnOverflow) {
         Label notMin;
         masm.branch32(Assembler::NotEqual, srcDest, Imm32(INT32_MIN), &notMin);
         if (zeroOnOverflow) {
             masm.branch32(Assembler::NotEqual, rhs, Imm32(-1), &notMin);
-            masm.move32(Imm32(0), srcDest);
+            moveImm32(0, srcDest);
             masm.jump(done);
         } else {
             masm.branch32(Assembler::Equal, rhs, Imm32(-1), trap(Trap::IntegerOverflow));
         }
         masm.bind(&notMin);
     }
 
     void checkDivideSignedOverflowI64(RegI64 rhs, RegI64 srcDest, Label* done, bool zeroOnOverflow) {
@@ -3743,20 +3752,20 @@ class BaseCompiler final : public BaseCo
 #if defined(JS_CODEGEN_X64)
         masm.cmpq(rhs.reg, lhs.reg);
         masm.emitSet(cond, dest);
 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
         // TODO / OPTIMIZE (Bug 1316822): This is pretty branchy, we should be
         // able to do better.
         Label done, condTrue;
         masm.branch64(cond, lhs, rhs, &condTrue);
-        masm.move32(Imm32(0), dest);
+        moveImm32(0, dest);
         masm.jump(&done);
         masm.bind(&condTrue);
-        masm.move32(Imm32(1), dest);
+        moveImm32(1, dest);
         masm.bind(&done);
 #else
         MOZ_CRASH("BaseCompiler platform hook: cmp64Set");
 #endif
     }
 
     void eqz64(RegI64 src, RegI32 dest) {
 #if defined(JS_CODEGEN_X64)
@@ -3918,21 +3927,21 @@ class BaseCompiler final : public BaseCo
                               trap(Trap::UnalignedAccess));
         }
 
         // Ensure no tls if we don't need it.
 
 #ifdef WASM_HUGE_MEMORY
         // We have HeapReg and no bounds checking and need load neither
         // memoryBase nor boundsCheckLimit from tls.
-        MOZ_ASSERT_IF(check->omitBoundsCheck, tls == invalidI32());
+        MOZ_ASSERT_IF(check->omitBoundsCheck, tls.invalid());
 #endif
 #ifdef JS_CODEGEN_ARM
         // We have HeapReg on ARM and don't need to load the memoryBase from tls.
-        MOZ_ASSERT_IF(check->omitBoundsCheck, tls == invalidI32());
+        MOZ_ASSERT_IF(check->omitBoundsCheck, tls.invalid());
 #endif
 
         // Bounds check if required.
 
 #ifndef WASM_HUGE_MEMORY
         if (!check->omitBoundsCheck) {
             masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr,
                                  Address(tls, offsetof(TlsData, boundsCheckLimit)),
@@ -4002,17 +4011,17 @@ class BaseCompiler final : public BaseCo
 #elif defined(JS_CODEGEN_ARM)
         if (IsUnaligned(*access)) {
             switch (dest.tag) {
               case AnyReg::I64:
                 masm.wasmUnalignedLoadI64(*access, HeapReg, ptr, ptr, dest.i64(), tmp1);
                 break;
               case AnyReg::F32:
                 masm.wasmUnalignedLoadFP(*access, HeapReg, ptr, ptr, dest.f32(), tmp1, tmp2,
-                                         Register::Invalid());
+                                         RegI32::Invalid());
                 break;
               case AnyReg::F64:
                 masm.wasmUnalignedLoadFP(*access, HeapReg, ptr, ptr, dest.f64(), tmp1, tmp2, tmp3);
                 break;
               default:
                 masm.wasmUnalignedLoad(*access, HeapReg, ptr, ptr, dest.i32(), tmp1);
                 break;
             }
@@ -4040,22 +4049,22 @@ class BaseCompiler final : public BaseCo
     // This may destroy ptr and src.
     MOZ_MUST_USE bool store(MemoryAccessDesc* access, AccessCheck* check, RegI32 tls, RegI32 ptr,
                             AnyReg src, RegI32 tmp)
     {
         prepareMemoryAccess(access, check, tls, ptr);
 
         // Emit the store
 #if defined(JS_CODEGEN_X64)
-        MOZ_ASSERT(tmp == invalidI32());
+        MOZ_ASSERT(tmp.invalid());
         Operand dstAddr(HeapReg, ptr, TimesOne, access->offset());
 
         masm.wasmStore(*access, src.any(), dstAddr);
 #elif defined(JS_CODEGEN_X86)
-        MOZ_ASSERT(tmp == invalidI32());
+        MOZ_ASSERT(tmp.invalid());
         masm.addPtr(Address(tls, offsetof(TlsData, memoryBase)), ptr);
         Operand dstAddr(ptr, access->offset());
 
         if (access->type() == Scalar::Int64) {
             masm.wasmStoreI64(*access, src.i64(), dstAddr);
         } else {
             AnyRegister value;
             if (src.tag == AnyReg::I64) {
@@ -4082,22 +4091,22 @@ class BaseCompiler final : public BaseCo
                 break;
               case AnyReg::F32:
                 masm.wasmUnalignedStoreFP(*access, src.f32(), HeapReg, ptr, ptr, tmp);
                 break;
               case AnyReg::F64:
                 masm.wasmUnalignedStoreFP(*access, src.f64(), HeapReg, ptr, ptr, tmp);
                 break;
               default:
-                MOZ_ASSERT(tmp == invalidI32());
+                MOZ_ASSERT(tmp.invalid());
                 masm.wasmUnalignedStore(*access, src.i32(), HeapReg, ptr, ptr);
                 break;
             }
         } else {
-            MOZ_ASSERT(tmp == invalidI32());
+            MOZ_ASSERT(tmp.invalid());
             if (access->type() == Scalar::Int64)
                 masm.wasmStoreI64(*access, src.i64(), HeapReg, ptr, ptr);
             else if (src.tag == AnyReg::I64)
                 masm.wasmStore(*access, AnyRegister(src.i64().low), HeapReg, ptr, ptr);
             else
                 masm.wasmStore(*access, src.any(), HeapReg, ptr, ptr);
         }
 #else
@@ -4110,17 +4119,17 @@ class BaseCompiler final : public BaseCo
 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
 
 # define ATOMIC_PTR(name, access, tls, ptr)                             \
     BaseIndex name(HeapReg, (ptr), TimesOne, (access)->offset())
 
 #elif defined(JS_CODEGEN_X86)
 
 # define ATOMIC_PTR(name, access, tls, ptr)                             \
-    MOZ_ASSERT((tls) != invalidI32());                                  \
+    MOZ_ASSERT((tls).valid());                                          \
     masm.addPtr(Address((tls), offsetof(TlsData, memoryBase)), (ptr));  \
     Address name((ptr), (access)->offset())
 
 #else
 
 # define ATOMIC_PTR(name, access, tls, ptr)                       \
     MOZ_CRASH("BaseCompiler platform hook: address computation"); \
     Address srcAddr
@@ -4196,17 +4205,17 @@ class BaseCompiler final : public BaseCo
         ATOMIC_PTR(srcAddr, access, tls, ptr);
 
         switch (access->type()) {
           case Scalar::Uint8: {
             RegI32 v = rv;
             RegI32 d = rd;
 #ifdef JS_CODEGEN_X86
             // The temp, if used, must be a byte register.
-            MOZ_ASSERT(tmp == invalidI32());
+            MOZ_ASSERT(tmp.invalid());
             ScratchEBX scratch(*this);
             if (op != AtomicFetchAddOp && op != AtomicFetchSubOp)
                 tmp = scratch;
 #endif
             switch (op) {
               case AtomicFetchAddOp: masm.atomicFetchAdd8ZeroExtend(v, srcAddr, tmp, d); break;
               case AtomicFetchSubOp: masm.atomicFetchSub8ZeroExtend(v, srcAddr, tmp, d); break;
               case AtomicFetchAndOp: masm.atomicFetchAnd8ZeroExtend(v, srcAddr, tmp, d); break;
@@ -5022,17 +5031,17 @@ BaseCompiler::emitQuotientI64()
 {
 # ifdef JS_PUNBOX64
     int64_t c;
     uint_fast8_t power;
     if (popConstPositivePowerOfTwoI64(&c, &power, 0)) {
         if (power != 0) {
             RegI64 r = popI64();
             Label positive;
-            masm.branchTest64(Assembler::NotSigned, r, r, Register::Invalid(),
+            masm.branchTest64(Assembler::NotSigned, r, r, RegI32::Invalid(),
                               &positive);
             masm.add64(Imm64(c-1), r);
             masm.bind(&positive);
 
             masm.rshift64Arithmetic(Imm32(power & 63), r);
             pushI64(r);
         }
     } else {
@@ -5080,18 +5089,17 @@ BaseCompiler::emitRemainderI64()
     int64_t c;
     uint_fast8_t power;
     if (popConstPositivePowerOfTwoI64(&c, &power, 1)) {
         RegI64 r = popI64();
         RegI64 temp = needI64();
         moveI64(r, temp);
 
         Label positive;
-        masm.branchTest64(Assembler::NotSigned, temp, temp,
-                          Register::Invalid(), &positive);
+        masm.branchTest64(Assembler::NotSigned, temp, temp, RegI32::Invalid(), &positive);
         masm.add64(Imm64(c-1), temp);
         masm.bind(&positive);
 
         masm.rshift64Arithmetic(Imm32(power & 63), temp);
         masm.lshift64(Imm32(power & 63), temp);
         masm.sub64(temp, r);
         freeI64(temp);
 
@@ -5158,68 +5166,68 @@ BaseCompiler::emitMinF32()
 {
     RegF32 r0, r1;
     pop2xF32(&r0, &r1);
     // Convert signaling NaN to quiet NaNs.
     //
     // TODO / OPTIMIZE (bug 1316824): Don't do this if one of the operands
     // is known to be a constant.
     ScratchF32 zero(*this);
-    masm.loadConstantFloat32(0.f, zero);
+    moveImmF32(0.f, zero);
     masm.subFloat32(zero, r0);
     masm.subFloat32(zero, r1);
     masm.minFloat32(r1, r0, HandleNaNSpecially(true));
     freeF32(r1);
     pushF32(r0);
 }
 
 void
 BaseCompiler::emitMaxF32()
 {
     RegF32 r0, r1;
     pop2xF32(&r0, &r1);
     // Convert signaling NaN to quiet NaNs.
     //
     // TODO / OPTIMIZE (bug 1316824): see comment in emitMinF32.
     ScratchF32 zero(*this);
-    masm.loadConstantFloat32(0.f, zero);
+    moveImmF32(0.f, zero);
     masm.subFloat32(zero, r0);
     masm.subFloat32(zero, r1);
     masm.maxFloat32(r1, r0, HandleNaNSpecially(true));
     freeF32(r1);
     pushF32(r0);
 }
 
 void
 BaseCompiler::emitMinF64()
 {
     RegF64 r0, r1;
     pop2xF64(&r0, &r1);
     // Convert signaling NaN to quiet NaNs.
     //
     // TODO / OPTIMIZE (bug 1316824): see comment in emitMinF32.
     ScratchF64 zero(*this);
-    masm.loadConstantDouble(0, zero);
+    moveImmF64(0, zero);
     masm.subDouble(zero, r0);
     masm.subDouble(zero, r1);
     masm.minDouble(r1, r0, HandleNaNSpecially(true));
     freeF64(r1);
     pushF64(r0);
 }
 
 void
 BaseCompiler::emitMaxF64()
 {
     RegF64 r0, r1;
     pop2xF64(&r0, &r1);
     // Convert signaling NaN to quiet NaNs.
     //
     // TODO / OPTIMIZE (bug 1316824): see comment in emitMinF32.
     ScratchF64 zero(*this);
-    masm.loadConstantDouble(0, zero);
+    moveImmF64(0, zero);
     masm.subDouble(zero, r0);
     masm.subDouble(zero, r1);
     masm.maxDouble(r1, r0, HandleNaNSpecially(true));
     freeF64(r1);
     pushF64(r0);
 }
 
 void
@@ -5605,31 +5613,31 @@ void
 BaseCompiler::emitPopcntI32()
 {
     RegI32 r0 = popI32();
     if (popcnt32NeedsTemp()) {
         RegI32 tmp = needI32();
         masm.popcnt32(r0, r0, tmp);
         freeI32(tmp);
     } else {
-        masm.popcnt32(r0, r0, invalidI32());
+        masm.popcnt32(r0, r0, RegI32::Invalid());
     }
     pushI32(r0);
 }
 
 void
 BaseCompiler::emitPopcntI64()
 {
     RegI64 r0 = popI64();
     if (popcnt64NeedsTemp()) {
         RegI32 tmp = needI32();
         masm.popcnt64(r0, r0, tmp);
         freeI32(tmp);
     } else {
-        masm.popcnt64(r0, r0, invalidI32());
+        masm.popcnt64(r0, r0, RegI32::Invalid());
     }
     pushI64(r0);
 }
 
 void
 BaseCompiler::emitAbsF32()
 {
     RegF32 r0 = popF32();
@@ -5711,17 +5719,17 @@ BaseCompiler::emitTruncateF32ToI64()
     RegF32 r0 = popF32();
     RegI64 x0 = needI64();
     if (isUnsigned) {
         RegF64 tmp = needF64();
         if (!truncateF32ToI64(r0, x0, isUnsigned, tmp))
             return false;
         freeF64(tmp);
     } else {
-        if (!truncateF32ToI64(r0, x0, isUnsigned, invalidF64()))
+        if (!truncateF32ToI64(r0, x0, isUnsigned, RegF64::Invalid()))
             return false;
     }
     freeF32(r0);
     pushI64(x0);
     return true;
 }
 
 template<bool isUnsigned>
@@ -5731,17 +5739,17 @@ BaseCompiler::emitTruncateF64ToI64()
     RegF64 r0 = popF64();
     RegI64 x0 = needI64();
     if (isUnsigned) {
         RegF64 tmp = needF64();
         if (!truncateF64ToI64(r0, x0, isUnsigned, tmp))
             return false;
         freeF64(tmp);
     } else {
-        if (!truncateF64ToI64(r0, x0, isUnsigned, invalidF64()))
+        if (!truncateF64ToI64(r0, x0, isUnsigned, RegF64::Invalid()))
             return false;
     }
     freeF64(r0);
     pushI64(x0);
     return true;
 }
 #endif // RABALDR_FLOAT_TO_I64_CALLOUT
 
@@ -7224,17 +7232,17 @@ BaseCompiler::popMemoryAccess(MemoryAcce
         // beneficial.
 
         if (ea <= UINT32_MAX) {
             addr = uint32_t(ea);
             access->clearOffset();
         }
 
         RegI32 r = needI32();
-        loadConstI32(r, int32_t(addr));
+        moveImm32(int32_t(addr), r);
         return r;
     }
 
     uint32_t local;
     if (peekLocalI32(&local))
         bceCheckLocal(access, check, local);
 
     return popI32();
@@ -7454,19 +7462,19 @@ BaseCompiler::emitSelect()
         // normally pop before executing the branch.  On x86 this is one value
         // too many, so we need to generate more complicated code here, and for
         // simplicity's sake we do so even if the branch operands are not Int64.
         // However, the resulting control flow diamond is complicated since the
         // arms of the diamond will have to stay synchronized with respect to
         // their evaluation stack and regalloc state.  To simplify further, we
         // use a double branch and a temporary boolean value for now.
         RegI32 tmp = needI32();
-        loadConstI32(tmp, 0);
+        moveImm32(0, tmp);
         emitBranchPerform(&b);
-        loadConstI32(tmp, 1);
+        moveImm32(1, tmp);
         masm.bind(&done);
 
         Label trueValue;
         RegI64 r0, r1;
         pop2xI64(&r0, &r1);
         masm.branch32(Assembler::Equal, tmp, Imm32(0), &trueValue);
         moveI64(r1, r0);
         masm.bind(&trueValue);
@@ -7558,19 +7566,19 @@ BaseCompiler::emitCompareF32(Assembler::
 
     if (sniffConditionalControlCmp(compareOp, compareType))
         return;
 
     Label across;
     RegF32 r0, r1;
     pop2xF32(&r0, &r1);
     RegI32 i0 = needI32();
-    masm.mov(ImmWord(1), i0);
+    moveImm32(1, i0);
     masm.branchFloat(compareOp, r0, r1, &across);
-    masm.mov(ImmWord(0), i0);
+    moveImm32(0, i0);
     masm.bind(&across);
     freeF32(r0);
     freeF32(r1);
     pushI32(i0);
 }
 
 void
 BaseCompiler::emitCompareF64(Assembler::DoubleCondition compareOp, ValType compareType)
@@ -7579,19 +7587,19 @@ BaseCompiler::emitCompareF64(Assembler::
 
     if (sniffConditionalControlCmp(compareOp, compareType))
         return;
 
     Label across;
     RegF64 r0, r1;
     pop2xF64(&r0, &r1);
     RegI32 i0 = needI32();
-    masm.mov(ImmWord(1), i0);
+    moveImm32(1, i0);
     masm.branchDouble(compareOp, r0, r1, &across);
-    masm.mov(ImmWord(0), i0);
+    moveImm32(0, i0);
     masm.bind(&across);
     freeF64(r0);
     freeF64(r1);
     pushI32(i0);
 }
 
 void
 BaseCompiler::emitInstanceCall(uint32_t lineOrBytecode, const MIRTypeVector& sig,