Bug 1304672: More jit/ renamings from asm.js to wasm; r?luke draft
authorBenjamin Bouvier <benj@benj.me>
Wed, 02 Nov 2016 18:06:57 +0100
changeset 432764 74ba6afedbe5cc224348d8cbac3bdc6cdb0ee191
parent 432763 acfe4bfd5593fa7289583574ac90deba55ac24fe
child 432765 ad7472c869f5091fb4d98afe3b6230c4382cdc4d
push id34416
push userbbouvier@mozilla.com
push dateWed, 02 Nov 2016 18:00:01 +0000
reviewersluke
bugs1304672
milestone52.0a1
Bug 1304672: More jit/ renamings from asm.js to wasm; r?luke MozReview-Commit-ID: 62aCkmFbso1
js/src/asmjs/WasmBaselineCompile.cpp
js/src/asmjs/WasmFrameIterator.cpp
js/src/asmjs/WasmGenerator.cpp
js/src/asmjs/WasmIonCompile.cpp
js/src/asmjs/WasmIonCompile.h
js/src/asmjs/WasmJS.cpp
js/src/asmjs/WasmStubs.cpp
js/src/jit/AlignmentMaskAnalysis.cpp
js/src/jit/BacktrackingAllocator.cpp
js/src/jit/C1Spewer.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/EffectiveAddressAnalysis.cpp
js/src/jit/Ion.cpp
js/src/jit/IonAnalysis.cpp
js/src/jit/IonOptimizationLevels.cpp
js/src/jit/IonOptimizationLevels.h
js/src/jit/JSONSpewer.cpp
js/src/jit/JitCompartment.h
js/src/jit/JitOptions.cpp
js/src/jit/JitSpewer.cpp
js/src/jit/JitSpewer.h
js/src/jit/Linker.cpp
js/src/jit/Lowering.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MIRGenerator.h
js/src/jit/MIRGraph.cpp
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/PerfSpewer.cpp
js/src/jit/PerfSpewer.h
js/src/jit/RangeAnalysis.cpp
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm/MacroAssembler-arm.h
js/src/jit/arm/Simulator-arm.cpp
js/src/jit/arm64/Assembler-arm64.cpp
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/arm64/MacroAssembler-arm64.cpp
js/src/jit/arm64/MacroAssembler-arm64.h
js/src/jit/mips-shared/Assembler-mips-shared.h
js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
js/src/jit/mips32/Assembler-mips32.h
js/src/jit/mips32/CodeGenerator-mips32.cpp
js/src/jit/mips32/MacroAssembler-mips32.cpp
js/src/jit/mips32/MacroAssembler-mips32.h
js/src/jit/mips32/Simulator-mips32.cpp
js/src/jit/mips64/Assembler-mips64.h
js/src/jit/mips64/CodeGenerator-mips64.cpp
js/src/jit/mips64/MacroAssembler-mips64.cpp
js/src/jit/mips64/MacroAssembler-mips64.h
js/src/jit/mips64/Simulator-mips64.cpp
js/src/jit/none/Architecture-none.h
js/src/jit/none/MacroAssembler-none.h
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/CodeGenerator-shared-inl.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/x64/Assembler-x64.cpp
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/MacroAssembler-x64.cpp
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86/Assembler-x86.cpp
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/MacroAssembler-x86.cpp
--- a/js/src/asmjs/WasmBaselineCompile.cpp
+++ b/js/src/asmjs/WasmBaselineCompile.cpp
@@ -2041,17 +2041,17 @@ class BaseCompiler
             masm.mov(ImmWord(0), scratch);
             for (int32_t i = varLow_ ; i < varHigh_ ; i+=4)
                 storeToFrameI32(scratch, i+4);
         }
     }
 
     bool endFunction() {
         // Out-of-line prologue.  Assumes that the in-line prologue has
-        // been executed and that a frame of size = localSize_ + sizeof(AsmJSFrame)
+        // been executed and that a frame of size = localSize_ + sizeof(WasmFrame)
         // has been allocated.
 
         masm.bind(&outOfLinePrologue_);
 
         MOZ_ASSERT(maxFramePushed_ >= localSize_);
 
         // ABINonArgReg0 != ScratchReg, which can be used by branchPtr().
 
@@ -2144,17 +2144,17 @@ class BaseCompiler
             call.hardFP = true;
 # else
             call.hardFP = false;
 # endif
             call.abi.setUseHardFp(call.hardFP);
 #endif
         }
 
-        call.frameAlignAdjustment = ComputeByteAlignment(masm.framePushed() + sizeof(AsmJSFrame),
+        call.frameAlignAdjustment = ComputeByteAlignment(masm.framePushed() + sizeof(WasmFrame),
                                                          JitStackAlignment);
     }
 
     void endCall(FunctionCall& call)
     {
         size_t adjustment = call.stackArgAreaSize + call.frameAlignAdjustment;
         if (adjustment)
             masm.freeStack(adjustment);
@@ -2994,17 +2994,17 @@ class BaseCompiler
 #if defined(JS_CODEGEN_X64)
         CodeOffset label = masm.loadRipRelativeInt32(r.reg);
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_X86)
         CodeOffset label = masm.movlWithPatch(PatchedAbsoluteAddress(), r.reg);
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_ARM)
         ScratchRegisterScope scratch(*this); // Really must be the ARM scratchreg
-        unsigned addr = globalDataOffset - AsmJSGlobalRegBias;
+        unsigned addr = globalDataOffset - WasmGlobalRegBias;
         masm.ma_dtr(js::jit::IsLoad, GlobalReg, Imm32(addr), r.reg, scratch);
 #else
         MOZ_CRASH("BaseCompiler platform hook: loadGlobalVarI32");
 #endif
     }
 
     void loadGlobalVarI64(unsigned globalDataOffset, RegI64 r)
     {
@@ -3013,17 +3013,17 @@ class BaseCompiler
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_X86)
         CodeOffset labelLow = masm.movlWithPatch(PatchedAbsoluteAddress(), r.reg.low);
         masm.append(GlobalAccess(labelLow, globalDataOffset + INT64LOW_OFFSET));
         CodeOffset labelHigh = masm.movlWithPatch(PatchedAbsoluteAddress(), r.reg.high);
         masm.append(GlobalAccess(labelHigh, globalDataOffset + INT64HIGH_OFFSET));
 #elif defined(JS_CODEGEN_ARM)
         ScratchRegisterScope scratch(*this); // Really must be the ARM scratchreg
-        unsigned addr = globalDataOffset - AsmJSGlobalRegBias;
+        unsigned addr = globalDataOffset - WasmGlobalRegBias;
         masm.ma_dtr(js::jit::IsLoad, GlobalReg, Imm32(addr + INT64LOW_OFFSET), r.reg.low, scratch);
         masm.ma_dtr(js::jit::IsLoad, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), r.reg.high,
                     scratch);
 #else
         MOZ_CRASH("BaseCompiler platform hook: loadGlobalVarI64");
 #endif
     }
 
@@ -3031,34 +3031,34 @@ class BaseCompiler
     {
 #if defined(JS_CODEGEN_X64)
         CodeOffset label = masm.loadRipRelativeFloat32(r.reg);
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_X86)
         CodeOffset label = masm.vmovssWithPatch(PatchedAbsoluteAddress(), r.reg);
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_ARM)
-        unsigned addr = globalDataOffset - AsmJSGlobalRegBias;
+        unsigned addr = globalDataOffset - WasmGlobalRegBias;
         VFPRegister vd(r.reg);
         masm.ma_vldr(VFPAddr(GlobalReg, VFPOffImm(addr)), vd.singleOverlay());
 #else
         MOZ_CRASH("BaseCompiler platform hook: loadGlobalVarF32");
 #endif
     }
 
     void loadGlobalVarF64(unsigned globalDataOffset, RegF64 r)
     {
 #if defined(JS_CODEGEN_X64)
         CodeOffset label = masm.loadRipRelativeDouble(r.reg);
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_X86)
         CodeOffset label = masm.vmovsdWithPatch(PatchedAbsoluteAddress(), r.reg);
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_ARM)
-        unsigned addr = globalDataOffset - AsmJSGlobalRegBias;
+        unsigned addr = globalDataOffset - WasmGlobalRegBias;
         masm.ma_vldr(VFPAddr(GlobalReg, VFPOffImm(addr)), r.reg);
 #else
         MOZ_CRASH("BaseCompiler platform hook: loadGlobalVarF64");
 #endif
     }
 
     // CodeGeneratorX64::visitWasmStoreGlobal()
 
@@ -3067,17 +3067,17 @@ class BaseCompiler
 #if defined(JS_CODEGEN_X64)
         CodeOffset label = masm.storeRipRelativeInt32(r.reg);
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_X86)
         CodeOffset label = masm.movlWithPatch(r.reg, PatchedAbsoluteAddress());
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_ARM)
         ScratchRegisterScope scratch(*this); // Really must be the ARM scratchreg
-        unsigned addr = globalDataOffset - AsmJSGlobalRegBias;
+        unsigned addr = globalDataOffset - WasmGlobalRegBias;
         masm.ma_dtr(js::jit::IsStore, GlobalReg, Imm32(addr), r.reg, scratch);
 #else
         MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarI32");
 #endif
     }
 
     void storeGlobalVarI64(unsigned globalDataOffset, RegI64 r)
     {
@@ -3086,17 +3086,17 @@ class BaseCompiler
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_X86)
         CodeOffset labelLow = masm.movlWithPatch(r.reg.low, PatchedAbsoluteAddress());
         masm.append(GlobalAccess(labelLow, globalDataOffset + INT64LOW_OFFSET));
         CodeOffset labelHigh = masm.movlWithPatch(r.reg.high, PatchedAbsoluteAddress());
         masm.append(GlobalAccess(labelHigh, globalDataOffset + INT64HIGH_OFFSET));
 #elif defined(JS_CODEGEN_ARM)
         ScratchRegisterScope scratch(*this); // Really must be the ARM scratchreg
-        unsigned addr = globalDataOffset - AsmJSGlobalRegBias;
+        unsigned addr = globalDataOffset - WasmGlobalRegBias;
         masm.ma_dtr(js::jit::IsStore, GlobalReg, Imm32(addr + INT64LOW_OFFSET), r.reg.low, scratch);
         masm.ma_dtr(js::jit::IsStore, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), r.reg.high,
                     scratch);
 #else
         MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarI64");
 #endif
     }
 
@@ -3104,34 +3104,34 @@ class BaseCompiler
     {
 #if defined(JS_CODEGEN_X64)
         CodeOffset label = masm.storeRipRelativeFloat32(r.reg);
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_X86)
         CodeOffset label = masm.vmovssWithPatch(r.reg, PatchedAbsoluteAddress());
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_ARM)
-        unsigned addr = globalDataOffset - AsmJSGlobalRegBias;
+        unsigned addr = globalDataOffset - WasmGlobalRegBias;
         VFPRegister vd(r.reg);
         masm.ma_vstr(vd.singleOverlay(), VFPAddr(GlobalReg, VFPOffImm(addr)));
 #else
         MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarF32");
 #endif
     }
 
     void storeGlobalVarF64(unsigned globalDataOffset, RegF64 r)
     {
 #if defined(JS_CODEGEN_X64)
         CodeOffset label = masm.storeRipRelativeDouble(r.reg);
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_X86)
         CodeOffset label = masm.vmovsdWithPatch(r.reg, PatchedAbsoluteAddress());
         masm.append(GlobalAccess(label, globalDataOffset));
 #elif defined(JS_CODEGEN_ARM)
-        unsigned addr = globalDataOffset - AsmJSGlobalRegBias;
+        unsigned addr = globalDataOffset - WasmGlobalRegBias;
         masm.ma_vstr(r.reg, VFPAddr(GlobalReg, VFPOffImm(addr)));
 #else
         MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarF64");
 #endif
     }
 
     //////////////////////////////////////////////////////////////////////
     //
@@ -7371,35 +7371,35 @@ BaseCompiler::init()
 
     for (ABIArgIter<const ValTypeVector> i(args); !i.done(); i++) {
         Local& l = localInfo_[i.index()];
         switch (i.mirType()) {
           case MIRType::Int32:
             if (i->argInRegister())
                 l.init(MIRType::Int32, pushLocal(4));
             else
-                l.init(MIRType::Int32, -(i->offsetFromArgBase() + sizeof(AsmJSFrame)));
+                l.init(MIRType::Int32, -(i->offsetFromArgBase() + sizeof(WasmFrame)));
             break;
           case MIRType::Int64:
             if (i->argInRegister())
                 l.init(MIRType::Int64, pushLocal(8));
             else
-                l.init(MIRType::Int64, -(i->offsetFromArgBase() + sizeof(AsmJSFrame)));
+                l.init(MIRType::Int64, -(i->offsetFromArgBase() + sizeof(WasmFrame)));
             break;
           case MIRType::Double:
             if (i->argInRegister())
                 l.init(MIRType::Double, pushLocal(8));
             else
-                l.init(MIRType::Double, -(i->offsetFromArgBase() + sizeof(AsmJSFrame)));
+                l.init(MIRType::Double, -(i->offsetFromArgBase() + sizeof(WasmFrame)));
             break;
           case MIRType::Float32:
             if (i->argInRegister())
                 l.init(MIRType::Float32, pushLocal(4));
             else
-                l.init(MIRType::Float32, -(i->offsetFromArgBase() + sizeof(AsmJSFrame)));
+                l.init(MIRType::Float32, -(i->offsetFromArgBase() + sizeof(WasmFrame)));
             break;
           default:
             MOZ_CRASH("Argument type");
         }
     }
 
     // Reserve a stack slot for the TLS pointer outside the varLow..varHigh
     // range so it isn't zero-filled like the normal locals.
--- a/js/src/asmjs/WasmFrameIterator.cpp
+++ b/js/src/asmjs/WasmFrameIterator.cpp
@@ -30,23 +30,23 @@ using mozilla::DebugOnly;
 using mozilla::Swap;
 
 /*****************************************************************************/
 // FrameIterator implementation
 
 static void*
 ReturnAddressFromFP(void* fp)
 {
-    return reinterpret_cast<AsmJSFrame*>(fp)->returnAddress;
+    return reinterpret_cast<WasmFrame*>(fp)->returnAddress;
 }
 
 static uint8_t*
 CallerFPFromFP(void* fp)
 {
-    return reinterpret_cast<AsmJSFrame*>(fp)->callerFP;
+    return reinterpret_cast<WasmFrame*>(fp)->callerFP;
 }
 
 FrameIterator::FrameIterator()
   : activation_(nullptr),
     code_(nullptr),
     callsite_(nullptr),
     codeRange_(nullptr),
     fp_(nullptr),
@@ -396,17 +396,17 @@ wasm::GenerateFunctionPrologue(MacroAsse
         break;
     }
     offsets->tableProfilingJump = masm.nopPatchableToNearJump().offset();
 
     // Generate normal prologue:
     masm.nopAlign(CodeAlignment);
     offsets->nonProfilingEntry = masm.currentOffset();
     PushRetAddr(masm);
-    masm.subFromStackPtr(Imm32(framePushed + AsmJSFrameBytesAfterReturnAddress));
+    masm.subFromStackPtr(Imm32(framePushed + WasmFrameBytesAfterReturnAddress));
 
     // Prologue join point, body begin:
     masm.bind(&body);
     masm.setFramePushed(framePushed);
 }
 
 // Similar to GenerateFunctionPrologue (see comment), we generate both a
 // profiling and non-profiling epilogue a priori. When the profiling mode is
@@ -425,17 +425,17 @@ wasm::GenerateFunctionEpilogue(MacroAsse
     masm.flushBuffer();
 #endif
 
     // Generate a nop that is overwritten by a jump to the profiling epilogue
     // when profiling is enabled.
     offsets->profilingJump = masm.nopPatchableToNearJump().offset();
 
     // Normal epilogue:
-    masm.addToStackPtr(Imm32(framePushed + AsmJSFrameBytesAfterReturnAddress));
+    masm.addToStackPtr(Imm32(framePushed + WasmFrameBytesAfterReturnAddress));
     masm.ret();
     masm.setFramePushed(0);
 
     // Profiling epilogue:
     offsets->profilingEpilogue = masm.currentOffset();
     GenerateProfilingEpilogue(masm, framePushed, ExitReason::None, offsets);
 }
 
@@ -625,18 +625,18 @@ ProfilingFrameIterator::ProfilingFrameIt
     const CodeRange* codeRange = code_->lookupRange(state.pc);
     switch (codeRange->kind()) {
       case CodeRange::Function:
       case CodeRange::FarJumpIsland:
       case CodeRange::ImportJitExit:
       case CodeRange::ImportInterpExit:
       case CodeRange::TrapExit: {
         // When the pc is inside the prologue/epilogue, the innermost
-        // call's AsmJSFrame is not complete and thus fp points to the
-        // second-to-innermost call's AsmJSFrame. Since fp can only tell you
+        // call's WasmFrame is not complete and thus fp points to the
+        // second-to-innermost call's WasmFrame. Since fp can only tell you
         // about its caller (via ReturnAddressFromFP(fp)), naively unwinding
         // while pc is in the prologue/epilogue would skip the second-to-
         // innermost call. To avoid this problem, we use the static structure of
         // the code in the prologue and epilogue to do the Right Thing.
         uint32_t offsetInModule = (uint8_t*)state.pc - code_->segment().base();
         MOZ_ASSERT(offsetInModule >= codeRange->begin());
         MOZ_ASSERT(offsetInModule < codeRange->end());
         uint32_t offsetInCodeRange = offsetInModule - codeRange->begin();
@@ -645,32 +645,32 @@ ProfilingFrameIterator::ProfilingFrameIt
         if (offsetInCodeRange < PushedRetAddr || InThunk(*codeRange, offsetInModule)) {
             // First instruction of the ARM/MIPS function; the return address is
             // still in lr and fp still holds the caller's fp.
             callerPC_ = state.lr;
             callerFP_ = fp;
             AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp - 2);
         } else if (offsetInModule == codeRange->profilingReturn() - PostStorePrePopFP) {
             // Second-to-last instruction of the ARM/MIPS function; fp points to
-            // the caller's fp; have not yet popped AsmJSFrame.
+            // the caller's fp; have not yet popped WasmFrame.
             callerPC_ = ReturnAddressFromFP(sp);
             callerFP_ = CallerFPFromFP(sp);
             AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp);
         } else
 #endif
         if (offsetInCodeRange < PushedFP || offsetInModule == codeRange->profilingReturn() ||
             InThunk(*codeRange, offsetInModule))
         {
             // The return address has been pushed on the stack but not fp; fp
             // still points to the caller's fp.
             callerPC_ = *sp;
             callerFP_ = fp;
             AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp - 1);
         } else if (offsetInCodeRange < StoredFP) {
-            // The full AsmJSFrame has been pushed; fp still points to the
+            // The full WasmFrame has been pushed; fp still points to the
             // caller's frame.
             MOZ_ASSERT(fp == CallerFPFromFP(sp));
             callerPC_ = ReturnAddressFromFP(sp);
             callerFP_ = CallerFPFromFP(sp);
             AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp);
         } else {
             // Not in the prologue/epilogue.
             callerPC_ = ReturnAddressFromFP(fp);
--- a/js/src/asmjs/WasmGenerator.cpp
+++ b/js/src/asmjs/WasmGenerator.cpp
@@ -44,26 +44,26 @@ static const unsigned COMPILATION_LIFO_D
 
 ModuleGenerator::ModuleGenerator(ImportVector&& imports)
   : alwaysBaseline_(false),
     imports_(Move(imports)),
     numSigs_(0),
     numTables_(0),
     lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
     masmAlloc_(&lifo_),
-    masm_(MacroAssembler::AsmJSToken(), masmAlloc_),
+    masm_(MacroAssembler::WasmToken(), masmAlloc_),
     lastPatchedCallsite_(0),
     startOfUnpatchedCallsites_(0),
     parallel_(false),
     outstanding_(0),
     activeFuncDef_(nullptr),
     startedFuncDefs_(false),
     finishedFuncDefs_(false)
 {
-    MOZ_ASSERT(IsCompilingAsmJS());
+    MOZ_ASSERT(IsCompilingWasm());
 }
 
 ModuleGenerator::~ModuleGenerator()
 {
     if (parallel_) {
         // Wait for any outstanding jobs to fail or complete.
         if (outstanding_) {
             AutoLockHelperThreadState lock;
@@ -457,17 +457,17 @@ ModuleGenerator::finishCodegen()
     TrapExitOffsetArray trapExits;
     Offsets outOfBoundsExit;
     Offsets unalignedAccessExit;
     Offsets interruptExit;
     Offsets throwStub;
 
     {
         TempAllocator alloc(&lifo_);
-        MacroAssembler masm(MacroAssembler::AsmJSToken(), alloc);
+        MacroAssembler masm(MacroAssembler::WasmToken(), alloc);
         Label throwLabel;
 
         if (!entries.resize(numFuncDefExports))
             return false;
         for (uint32_t i = 0; i < numFuncDefExports; i++)
             entries[i] = GenerateEntry(masm, metadata_->funcDefExports[i]);
 
         if (!interpExits.resize(numFuncImports()))
@@ -566,18 +566,18 @@ ModuleGenerator::finishCodegen()
 bool
 ModuleGenerator::finishLinkData(Bytes& code)
 {
     // Inflate the global bytes up to page size so that the total bytes are a
     // page size (as required by the allocator functions).
     linkData_.globalDataLength = AlignBytes(linkData_.globalDataLength, gc::SystemPageSize());
 
     // Add links to absolute addresses identified symbolically.
-    for (size_t i = 0; i < masm_.numAsmJSAbsoluteAddresses(); i++) {
-        AsmJSAbsoluteAddress src = masm_.asmJSAbsoluteAddress(i);
+    for (size_t i = 0; i < masm_.numWasmAbsoluteAddresses(); i++) {
+        WasmAbsoluteAddress src = masm_.wasmAbsoluteAddress(i);
         if (!linkData_.symbolicLinks[src.target].append(src.patchAt.offset()))
             return false;
     }
 
     // Relative link metadata: absolute addresses that refer to another point within
     // the asm.js module.
 
     // CodeLabels are used for switch cases and loads from floating-point /
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -853,17 +853,17 @@ class FunctionCompiler
     /***************************************************************** Calls */
 
     // The IonMonkey backend maintains a single stack offset (from the stack
     // pointer to the base of the frame) by adding the total amount of spill
     // space required plus the maximum stack required for argument passing.
     // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
     // manually accumulate, for the entire function, the maximum required stack
     // space for argument passing. (This is passed to the CodeGenerator via
-    // MIRGenerator::maxAsmJSStackArgBytes.) Naively, this would just be the
+    // MIRGenerator::maxWasmStackArgBytes.) Naively, this would just be the
     // maximum of the stack space required for each individual call (as
     // determined by the call ABI). However, as an optimization, arguments are
     // stored to the stack immediately after evaluation (to decrease live
     // ranges and reduce spilling). This introduces the complexity that,
     // between evaluating an argument and making the call, another argument
     // evaluation could perform a call that also needs to store to the stack.
     // When this occurs childClobbers_ = true and the parent expression's
     // arguments are stored above the maximum depth clobbered by a child
@@ -953,17 +953,17 @@ class FunctionCompiler
         // Record the stack offset before including spIncrement since MWasmCall
         // will use this offset after having bumped the stack pointer.
         if (tls == TlsUsage::CallerSaved) {
             call->tlsStackOffset_ = stackBytes;
             stackBytes += sizeof(void*);
         }
 
         if (call->childClobbers_) {
-            call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, AsmJSStackAlignment);
+            call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, WasmStackAlignment);
             for (MWasmStackArg* stackArg : call->stackArgs_)
                 stackArg->incrementOffset(call->spIncrement_);
 
             // If instanceArg_ is not initialized then instanceArg_.kind() != ABIArg::Stack
             if (call->instanceArg_.kind() == ABIArg::Stack) {
                 call->instanceArg_ = ABIArg(call->instanceArg_.offsetFromArgBase() +
                                             call->spIncrement_);
             }
@@ -3759,18 +3759,18 @@ wasm::IonCompileFunction(IonCompileTask*
 
     // Set up for Ion compilation.
 
     JitContext jitContext(&results.alloc());
     const JitCompileOptions options;
     MIRGraph graph(&results.alloc());
     CompileInfo compileInfo(locals.length());
     MIRGenerator mir(nullptr, options, &results.alloc(), &graph, &compileInfo,
-                     IonOptimizations.get(OptimizationLevel::AsmJS));
-    mir.initMinAsmJSHeapLength(task->mg().minMemoryLength);
+                     IonOptimizations.get(OptimizationLevel::Wasm));
+    mir.initMinWasmHeapLength(task->mg().minMemoryLength);
 
     // Capture the prologue's trap site before decoding the function.
 
     TrapOffset prologueTrapOffset;
 
     // Build MIR graph
     {
         FunctionCompiler f(task->mg(), d, func, locals, mir, results);
--- a/js/src/asmjs/WasmIonCompile.h
+++ b/js/src/asmjs/WasmIonCompile.h
@@ -76,17 +76,17 @@ class FuncCompileResults
     FuncOffsets offsets_;
 
     FuncCompileResults(const FuncCompileResults&) = delete;
     FuncCompileResults& operator=(const FuncCompileResults&) = delete;
 
   public:
     explicit FuncCompileResults(LifoAlloc& lifo)
       : alloc_(&lifo),
-        masm_(jit::MacroAssembler::AsmJSToken(), alloc_)
+        masm_(jit::MacroAssembler::WasmToken(), alloc_)
     {}
 
     jit::TempAllocator& alloc() { return alloc_; }
     jit::MacroAssembler& masm() { return masm_; }
     FuncOffsets& offsets() { return offsets_; }
 };
 
 // An IonCompileTask represents the task of compiling a single function body. An
--- a/js/src/asmjs/WasmJS.cpp
+++ b/js/src/asmjs/WasmJS.cpp
@@ -1660,9 +1660,8 @@ js::InitWebAssemblyClass(JSContext* cx, 
     global->setPrototype(JSProto_WasmInstance, ObjectValue(*instanceProto));
     global->setPrototype(JSProto_WasmMemory, ObjectValue(*memoryProto));
     global->setPrototype(JSProto_WasmTable, ObjectValue(*tableProto));
     global->setConstructor(JSProto_WebAssembly, ObjectValue(*wasm));
 
     MOZ_ASSERT(global->isStandardClassResolved(JSProto_WebAssembly));
     return wasm;
 }
-
--- a/js/src/asmjs/WasmStubs.cpp
+++ b/js/src/asmjs/WasmStubs.cpp
@@ -29,24 +29,24 @@ using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 
 using mozilla::ArrayLength;
 
 static void
 AssertStackAlignment(MacroAssembler& masm, uint32_t alignment, uint32_t addBeforeAssert = 0)
 {
-    MOZ_ASSERT((sizeof(AsmJSFrame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
+    MOZ_ASSERT((sizeof(WasmFrame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
     masm.assertStackAlignment(alignment, addBeforeAssert);
 }
 
 static unsigned
 StackDecrementForCall(MacroAssembler& masm, uint32_t alignment, unsigned bytesToPush)
 {
-    return StackDecrementForCall(alignment, sizeof(AsmJSFrame) + masm.framePushed(), bytesToPush);
+    return StackDecrementForCall(alignment, sizeof(WasmFrame) + masm.framePushed(), bytesToPush);
 }
 
 template <class VectorT>
 static unsigned
 StackArgBytes(const VectorT& args)
 {
     ABIArgIter<VectorT> iter(args);
     while (!iter.done())
@@ -117,17 +117,17 @@ wasm::GenerateEntry(MacroAssembler& masm
     MOZ_ASSERT(masm.framePushed() == FramePushedAfterSave);
 
     // Put the 'argv' argument into a non-argument/return/TLS register so that
     // we can use 'argv' while we fill in the arguments for the asm.js callee.
     Register argv = ABINonArgReturnReg0;
     Register scratch = ABINonArgReturnReg1;
 
     // Read the arguments of wasm::ExportFuncPtr according to the native ABI.
-    // The entry stub's frame is only 1 word, not the usual 2 for AsmJSFrame.
+    // The entry stub's frame is only 1 word, not the usual 2 for WasmFrame.
     const unsigned argBase = sizeof(void*) + masm.framePushed();
     ABIArgGenerator abi;
     ABIArg arg;
 
     // arg 1: ExportArg*
     arg = abi.next(MIRType::Pointer);
     if (arg.kind() == ABIArg::GPR)
         masm.movePtr(arg.gpr(), argv);
@@ -150,22 +150,22 @@ wasm::GenerateEntry(MacroAssembler& masm
 
     // Save the stack pointer in the WasmActivation right before dynamically
     // aligning the stack so that it may be recovered on return or throw.
     MOZ_ASSERT(masm.framePushed() == FramePushedForEntrySP);
     masm.loadWasmActivationFromTls(scratch);
     masm.storeStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
 
     // Dynamically align the stack since ABIStackAlignment is not necessarily
-    // AsmJSStackAlignment. We'll use entrySP to recover the original stack
+    // WasmStackAlignment. We'll use entrySP to recover the original stack
     // pointer on return.
-    masm.andToStackPtr(Imm32(~(AsmJSStackAlignment - 1)));
+    masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
 
     // Bump the stack for the call.
-    masm.reserveStack(AlignBytes(StackArgBytes(func.sig().args()), AsmJSStackAlignment));
+    masm.reserveStack(AlignBytes(StackArgBytes(func.sig().args()), WasmStackAlignment));
 
     // Copy parameters out of argv and into the registers/stack-slots specified by
     // the system ABI.
     for (ABIArgValTypeIter iter(func.sig().args()); !iter.done(); iter++) {
         unsigned argOffset = iter.index() * sizeof(ExportArg);
         Address src(argv, argOffset);
         MIRType type = iter.mirType();
         switch (iter->kind()) {
@@ -258,17 +258,17 @@ wasm::GenerateEntry(MacroAssembler& masm
               default:
                 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
             }
             break;
         }
     }
 
     // Call into the real function.
-    masm.assertStackAlignment(AsmJSStackAlignment);
+    masm.assertStackAlignment(WasmStackAlignment);
     masm.call(CallSiteDesc(CallSiteDesc::FuncDef), func.funcDefIndex());
 
     // Recover the stack pointer value before dynamic alignment.
     masm.loadWasmActivationFromTls(scratch);
     masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
     masm.setFramePushed(FramePushedForEntrySP);
 
     // Recover the 'argv' pointer which was saved before aligning the stack.
@@ -456,17 +456,17 @@ wasm::GenerateInterpExit(MacroAssembler&
     unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
     unsigned argBytes = Max<size_t>(1, sig.args().length()) * sizeof(Value);
     unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
 
     ProfilingOffsets offsets;
     GenerateExitPrologue(masm, framePushed, ExitReason::ImportInterp, &offsets);
 
     // Fill the argument array.
-    unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed();
+    unsigned offsetToCallerStackArgs = sizeof(WasmFrame) + masm.framePushed();
     Register scratch = ABINonArgReturnReg0;
     FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(false));
 
     // Prepare the arguments for the call to Module::callImport_*.
     ABIArgMIRTypeIter i(invokeArgTypes);
 
     // argument 0: Instance*
     Address instancePtr(WasmTlsReg, offsetof(TlsData, instance));
@@ -576,17 +576,17 @@ wasm::GenerateJitExit(MacroAssembler& ma
     masm.setFramePushed(0);
 
     // JIT calls use the following stack layout (sp grows to the left):
     //   | retaddr | descriptor | callee | argc | this | arg1..N |
     // After the JIT frame, the global register (if present) is saved since the
     // JIT's ABI does not preserve non-volatile regs. Also, unlike most ABIs,
     // the JIT ABI requires that sp be JitStackAlignment-aligned *after* pushing
     // the return address.
-    static_assert(AsmJSStackAlignment >= JitStackAlignment, "subsumes");
+    static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
     unsigned sizeOfRetAddr = sizeof(void*);
     unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + sig.args().length()) * sizeof(Value);
     unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes + SavedTlsReg;
     unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
                               sizeOfRetAddr;
 
     ProfilingOffsets offsets;
     GenerateExitPrologue(masm, jitFramePushed, ExitReason::ImportJit, &offsets);
@@ -618,35 +618,35 @@ wasm::GenerateJitExit(MacroAssembler& ma
     masm.storePtr(ImmWord(uintptr_t(argc)), Address(masm.getStackPointer(), argOffset));
     argOffset += sizeof(size_t);
 
     // 4. |this| value
     masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
     argOffset += sizeof(Value);
 
     // 5. Fill the arguments
-    unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(AsmJSFrame);
+    unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(WasmFrame);
     FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(true));
     argOffset += sig.args().length() * sizeof(Value);
     MOZ_ASSERT(argOffset == jitFrameBytes);
 
     // 6. Jit code will clobber all registers, even non-volatiles. WasmTlsReg
     //    must be kept live for the benefit of the epilogue, so push it on the
     //    stack so that it can be restored before the epilogue.
     static_assert(SavedTlsReg == sizeof(void*), "stack frame accounting");
     masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), jitFrameBytes));
 
     {
         // Enable Activation.
         //
         // This sequence requires two registers, and needs to preserve the
         // 'callee' register, so there are three live registers.
-        MOZ_ASSERT(callee == AsmJSIonExitRegCallee);
-        Register cx = AsmJSIonExitRegE0;
-        Register act = AsmJSIonExitRegE1;
+        MOZ_ASSERT(callee == WasmIonExitRegCallee);
+        Register cx = WasmIonExitRegE0;
+        Register act = WasmIonExitRegE1;
 
         // JitActivation* act = cx->activation();
         masm.movePtr(SymbolicAddress::Context, cx);
         masm.loadPtr(Address(cx, JSContext::offsetOfActivation()), act);
 
         // act.active_ = true;
         masm.store8(Imm32(1), Address(act, JitActivation::offsetOfActiveUint8()));
 
@@ -661,21 +661,21 @@ wasm::GenerateJitExit(MacroAssembler& ma
     masm.callJitNoProfiler(callee);
     AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
 
     {
         // Disable Activation.
         //
         // This sequence needs three registers, and must preserve the JSReturnReg_Data and
         // JSReturnReg_Type, so there are five live registers.
-        MOZ_ASSERT(JSReturnReg_Data == AsmJSIonExitRegReturnData);
-        MOZ_ASSERT(JSReturnReg_Type == AsmJSIonExitRegReturnType);
-        Register cx = AsmJSIonExitRegD0;
-        Register act = AsmJSIonExitRegD1;
-        Register tmp = AsmJSIonExitRegD2;
+        MOZ_ASSERT(JSReturnReg_Data == WasmIonExitRegReturnData);
+        MOZ_ASSERT(JSReturnReg_Type == WasmIonExitRegReturnType);
+        Register cx = WasmIonExitRegD0;
+        Register act = WasmIonExitRegD1;
+        Register tmp = WasmIonExitRegD2;
 
         // JitActivation* act = cx->activation();
         masm.movePtr(SymbolicAddress::Context, cx);
         masm.loadPtr(Address(cx, JSContext::offsetOfActivation()), act);
 
         // cx->jitTop = act->prevJitTop_;
         masm.loadPtr(Address(act, JitActivation::offsetOfPrevJitTop()), tmp);
         masm.storePtr(tmp, Address(cx, offsetof(JSContext, jitTop)));
@@ -745,17 +745,17 @@ wasm::GenerateJitExit(MacroAssembler& ma
 
     GenerateExitEpilogue(masm, masm.framePushed(), ExitReason::ImportJit, &offsets);
 
     if (oolConvert.used()) {
         masm.bind(&oolConvert);
         masm.setFramePushed(nativeFramePushed);
 
         // Coercion calls use the following stack layout (sp grows to the left):
-        //   | args | padding | Value argv[1] | padding | exit AsmJSFrame |
+        //   | args | padding | Value argv[1] | padding | exit WasmFrame |
         MIRTypeVector coerceArgTypes;
         JS_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
         unsigned offsetToCoerceArgv = AlignBytes(StackArgBytes(coerceArgTypes), sizeof(Value));
         MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
         AssertStackAlignment(masm, ABIStackAlignment);
 
         // Store return value into argv[0]
         masm.storeValue(JSReturnOperand, Address(masm.getStackPointer(), offsetToCoerceArgv));
--- a/js/src/jit/AlignmentMaskAnalysis.cpp
+++ b/js/src/jit/AlignmentMaskAnalysis.cpp
@@ -30,22 +30,22 @@ AnalyzeAsmHeapAddress(MDefinition* ptr, 
     // are transformed into this:
     //   a&m
     //   (a&m)+1
     //   (a&m)+2
     // and it will allow the constants to be folded by the
     // EffectiveAddressAnalysis pass.
     //
     // Putting the add on the outside might seem like it exposes other users of
-    // the expression to the possibility of i32 overflow, if we aren't in asm.js
+    // the expression to the possibility of i32 overflow, if we aren't in wasm
     // and they aren't naturally truncating. However, since we use MAdd::New
     // with MIRType::Int32, we make sure that the value is truncated, just as it
     // would be by the MBitAnd.
 
-    MOZ_ASSERT(IsCompilingAsmJS());
+    MOZ_ASSERT(IsCompilingWasm());
 
     if (!ptr->isBitAnd())
         return;
 
     MDefinition* lhs = ptr->toBitAnd()->getOperand(0);
     MDefinition* rhs = ptr->toBitAnd()->getOperand(1);
     if (lhs->isConstant())
         mozilla::Swap(lhs, rhs);
--- a/js/src/jit/BacktrackingAllocator.cpp
+++ b/js/src/jit/BacktrackingAllocator.cpp
@@ -2588,17 +2588,17 @@ BacktrackingAllocator::trySplitAcrossHot
     }
     if (!coldCode) {
         JitSpew(JitSpew_RegAlloc, "  bundle does not contain cold code");
         return true;
     }
 
     JitSpew(JitSpew_RegAlloc, "  split across hot range %s", hotRange->toString().get());
 
-    // Tweak the splitting method when compiling asm.js code to look at actual
+    // Tweak the splitting method when compiling wasm code to look at actual
     // uses within the hot/cold code. This heuristic is in place as the below
     // mechanism regresses several asm.js tests. Hopefully this will be fixed
     // soon and this special case removed. See bug 948838.
     if (compilingWasm()) {
         SplitPositionVector splitPositions;
         if (!splitPositions.append(hotRange->from()) || !splitPositions.append(hotRange->to()))
             return false;
         *success = true;
--- a/js/src/jit/C1Spewer.cpp
+++ b/js/src/jit/C1Spewer.cpp
@@ -26,18 +26,18 @@ C1Spewer::beginFunction(MIRGraph* graph,
 {
     this->graph  = graph;
 
     out_.printf("begin_compilation\n");
     if (script) {
         out_.printf("  name \"%s:%" PRIuSIZE "\"\n", script->filename(), script->lineno());
         out_.printf("  method \"%s:%" PRIuSIZE "\"\n", script->filename(), script->lineno());
     } else {
-        out_.printf("  name \"asm.js compilation\"\n");
-        out_.printf("  method \"asm.js compilation\"\n");
+        out_.printf("  name \"wasm compilation\"\n");
+        out_.printf("  method \"wasm compilation\"\n");
     }
     out_.printf("  date %d\n", (int)time(nullptr));
     out_.printf("end_compilation\n");
 }
 
 void
 C1Spewer::spewPass(const char* pass)
 {
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -163,17 +163,17 @@ CodeGenerator::CodeGenerator(MIRGenerato
   , ionScriptLabels_(gen->alloc())
   , scriptCounts_(nullptr)
   , simdRefreshTemplatesDuringLink_(0)
 {
 }
 
 CodeGenerator::~CodeGenerator()
 {
-    MOZ_ASSERT_IF(!gen->compilingWasm(), masm.numAsmJSAbsoluteAddresses() == 0);
+    MOZ_ASSERT_IF(!gen->compilingWasm(), masm.numWasmAbsoluteAddresses() == 0);
     js_delete(scriptCounts_);
 }
 
 typedef bool (*StringToNumberFn)(ExclusiveContext*, JSString*, double*);
 static const VMFunction StringToNumberInfo =
     FunctionInfo<StringToNumberFn>(StringToNumber, "StringToNumber");
 
 void
@@ -4724,17 +4724,17 @@ CodeGenerator::visitCheckOverRecursedFai
     masm.jump(ool->rejoin());
 }
 
 IonScriptCounts*
 CodeGenerator::maybeCreateScriptCounts()
 {
     // If scripts are being profiled, create a new IonScriptCounts for the
     // profiling data, which will be attached to the associated JSScript or
-    // AsmJS module after code generation finishes.
+    // wasm module after code generation finishes.
     if (!GetJitContext()->hasProfilingScripts())
         return nullptr;
 
     // This test inhibits IonScriptCount creation for wasm code which is
     // currently incompatible with wasm codegen for two reasons: (1) wasm code
     // must be serializable and script count codegen bakes in absolute
     // addresses, (2) wasm code does not have a JSScript with which to associate
     // code coverage data.
@@ -9173,17 +9173,17 @@ CodeGenerator::visitRest(LRest* lir)
 
     emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject, false, ToRegister(lir->output()));
 }
 
 bool
 CodeGenerator::generateWasm(wasm::SigIdDesc sigId, wasm::TrapOffset trapOffset,
                             wasm::FuncOffsets* offsets)
 {
-    JitSpew(JitSpew_Codegen, "# Emitting asm.js code");
+    JitSpew(JitSpew_Codegen, "# Emitting wasm code");
 
     wasm::GenerateFunctionPrologue(masm, frameSize(), sigId, trapOffset, offsets);
 
     // Overflow checks are omitted by CodeGenerator in some cases (leaf
     // functions with small framePushed). Perform overflow-checking after
     // pushing framePushed to catch cases with really large frames.
     Label onOverflow;
     if (!omitOverRecursedCheck()) {
--- a/js/src/jit/EffectiveAddressAnalysis.cpp
+++ b/js/src/jit/EffectiveAddressAnalysis.cpp
@@ -213,17 +213,17 @@ EffectiveAddressAnalysis::analyzeAsmJSHe
             ins->block()->insertBefore(ins, zero);
             ins->replaceBase(zero);
         }
 
         // If the index is within the minimum heap length, we can optimize
         // away the bounds check.
         if (imm >= 0) {
             int32_t end = (uint32_t)imm + ins->byteSize();
-            if (end >= imm && (uint32_t)end <= mir_->minAsmJSHeapLength())
+            if (end >= imm && (uint32_t)end <= mir_->minWasmHeapLength())
                  ins->removeBoundsCheck();
         }
     } else if (base->isAdd()) {
         // Look for heap[a+i] where i is a constant offset, and fold the offset.
         // Alignment masks have already been moved out of the way by the
         // Alignment Mask Analysis pass.
         MDefinition* op0 = base->toAdd()->getOperand(0);
         MDefinition* op1 = base->toAdd()->getOperand(1);
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -3427,17 +3427,17 @@ AutoFlushICache::setInhibit()
 // The common use case is merging cache flushes when preparing a code object.  In this
 // case the entire range of the code object is being flushed and as the code is patched
 // smaller redundant flushes could occur.  The design allows an AutoFlushICache dynamic
 // thread local context to be declared in which the range of the code object can be set
 // which defers flushing until the end of this dynamic context.  The redundant flushing
 // within this code range is also deferred avoiding redundant flushing.  Flushing outside
 // this code range is not affected and proceeds immediately.
 //
-// In some cases flushing is not necessary, such as when compiling an asm.js module which
+// In some cases flushing is not necessary, such as when compiling an wasm module which
 // is flushed again when dynamically linked, and also in error paths that abandon the
 // code.  Flushing within the set code range can be inhibited within the AutoFlushICache
 // dynamic context by setting an inhibit flag.
 //
 // The JS compiler can be re-entered while within an AutoFlushICache dynamic context and
 // it is assumed that code being assembled or patched is not executed before the exit of
 // the respective AutoFlushICache dynamic context.
 //
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -278,17 +278,17 @@ ConvertToBailingBlock(TempAllocator& all
 
     // And replace the last instruction by the unreachable control instruction.
     block->end(MUnreachable::New(alloc));
 }
 
 bool
 jit::PruneUnusedBranches(MIRGenerator* mir, MIRGraph& graph)
 {
-    MOZ_ASSERT(!mir->compilingWasm(), "AsmJS compilation have no code coverage support.");
+    MOZ_ASSERT(!mir->compilingWasm(), "wasm compilation has no code coverage support.");
 
     // We do a reverse-post-order traversal, marking basic blocks when the block
     // have to be converted into bailing blocks, and flagging block as
     // unreachable if all predecessors are flagged as bailing or unreachable.
     bool someUnreachable = false;
     for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
         if (mir->shouldCancel("Prune unused branches (main loop)"))
             return false;
--- a/js/src/jit/IonOptimizationLevels.cpp
+++ b/js/src/jit/IonOptimizationLevels.cpp
@@ -50,32 +50,32 @@ OptimizationInfo::initNormalOptimization
     smallFunctionMaxInlineDepth_ = 10;
     compilerWarmUpThreshold_ = CompilerWarmupThreshold;
     compilerSmallFunctionWarmUpThreshold_ = CompilerSmallFunctionWarmupThreshold;
     inliningWarmUpThresholdFactor_ = 0.125;
     inliningRecompileThresholdFactor_ = 4;
 }
 
 void
-OptimizationInfo::initAsmjsOptimizationInfo()
+OptimizationInfo::initWasmOptimizationInfo()
 {
-    // The AsmJS optimization level
-    // Disables some passes that don't work well with asmjs.
+    // The Wasm optimization level
+    // Disables some passes that don't work well with wasm.
 
     // Take normal option values for not specified values.
     initNormalOptimizationInfo();
 
-    level_ = OptimizationLevel::AsmJS;
+    level_ = OptimizationLevel::Wasm;
 
     ama_ = true;
     autoTruncate_ = false;
-    eagerSimdUnbox_ = false;           // AsmJS has no boxing / unboxing.
+    eagerSimdUnbox_ = false;           // wasm has no boxing / unboxing.
     edgeCaseAnalysis_ = false;
     eliminateRedundantChecks_ = false;
-    scalarReplacement_ = false;        // AsmJS has no objects.
+    scalarReplacement_ = false;        // wasm has no objects.
     sincos_ = false;
     sink_ = false;
 }
 
 uint32_t
 OptimizationInfo::compilerWarmUpThreshold(JSScript* script, jsbytecode* pc) const
 {
     MOZ_ASSERT(pc == nullptr || pc == script->code() || JSOp(*pc) == JSOP_LOOPENTRY);
@@ -114,17 +114,17 @@ OptimizationInfo::compilerWarmUpThreshol
     uint32_t loopDepth = LoopEntryDepthHint(pc);
     MOZ_ASSERT(loopDepth > 0);
     return warmUpThreshold + loopDepth * 100;
 }
 
 OptimizationLevelInfo::OptimizationLevelInfo()
 {
     infos_[OptimizationLevel::Normal].initNormalOptimizationInfo();
-    infos_[OptimizationLevel::AsmJS].initAsmjsOptimizationInfo();
+    infos_[OptimizationLevel::Wasm].initWasmOptimizationInfo();
 
 #ifdef DEBUG
     OptimizationLevel level = firstLevel();
     while (!isLastLevel(level)) {
         OptimizationLevel next = nextLevel(level);
         MOZ_ASSERT_IF(level != OptimizationLevel::DontCompile, level < next);
         level = next;
     }
@@ -134,17 +134,17 @@ OptimizationLevelInfo::OptimizationLevel
 OptimizationLevel
 OptimizationLevelInfo::nextLevel(OptimizationLevel level) const
 {
     MOZ_ASSERT(!isLastLevel(level));
     switch (level) {
       case OptimizationLevel::DontCompile:
         return OptimizationLevel::Normal;
       case OptimizationLevel::Normal:
-      case OptimizationLevel::AsmJS:
+      case OptimizationLevel::Wasm:
       case OptimizationLevel::Count:;
     }
     MOZ_CRASH("Unknown optimization level.");
 }
 
 OptimizationLevel
 OptimizationLevelInfo::firstLevel() const
 {
--- a/js/src/jit/IonOptimizationLevels.h
+++ b/js/src/jit/IonOptimizationLevels.h
@@ -16,32 +16,32 @@
 #include "js/TypeDecls.h"
 
 namespace js {
 namespace jit {
 
 enum class OptimizationLevel : uint8_t
 {
     Normal,
-    AsmJS,
+    Wasm,
     Count,
     DontCompile
 };
 
 #ifdef JS_JITSPEW
 inline const char*
 OptimizationLevelString(OptimizationLevel level)
 {
     switch (level) {
       case OptimizationLevel::DontCompile:
         return "Optimization_DontCompile";
       case OptimizationLevel::Normal:
         return "Optimization_Normal";
-      case OptimizationLevel::AsmJS:
-        return "Optimization_AsmJS";
+      case OptimizationLevel::Wasm:
+        return "Optimization_Wasm";
       case OptimizationLevel::Count:;
     }
     MOZ_CRASH("Invalid OptimizationLevel");
 }
 #endif
 
 class OptimizationInfo
 {
@@ -149,17 +149,17 @@ class OptimizationInfo
     // is hot enough to recompile the outerScript to inline that function,
     // as a multiplication of inliningWarmUpThreshold.
     uint32_t inliningRecompileThresholdFactor_;
 
     OptimizationInfo()
     { }
 
     void initNormalOptimizationInfo();
-    void initAsmjsOptimizationInfo();
+    void initWasmOptimizationInfo();
 
     OptimizationLevel level() const {
         return level_;
     }
 
     bool inlineInterpreted() const {
         return inlineInterpreted_ && !JitOptions.disableInlining;
     }
--- a/js/src/jit/JSONSpewer.cpp
+++ b/js/src/jit/JSONSpewer.cpp
@@ -145,17 +145,17 @@ JSONSpewer::endList()
 
 void
 JSONSpewer::beginFunction(JSScript* script)
 {
     beginObject();
     if (script)
         stringProperty("name", "%s:%" PRIuSIZE, script->filename(), script->lineno());
     else
-        stringProperty("name", "asm.js compilation");
+        stringProperty("name", "wasm compilation");
     beginListProperty("passes");
 }
 
 void
 JSONSpewer::beginPass(const char* pass)
 {
     beginObject();
     stringProperty("name", "%s", pass);
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -85,17 +85,17 @@ class JitRuntime
     enum BackedgeTarget {
         BackedgeLoopHeader,
         BackedgeInterruptCheck
     };
 
   private:
     friend class JitCompartment;
 
-    // Executable allocator for all code except asm.js code and Ion code with
+    // Executable allocator for all code except wasm code and Ion code with
     // patchable backedges (see below).
     ExecutableAllocator execAlloc_;
 
     // Executable allocator for Ion scripts with patchable backedges.
     ExecutableAllocator backedgeExecAlloc_;
 
     // Shared exception-handler tail.
     JitCode* exceptionTail_;
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -57,17 +57,17 @@ T overrideDefault(const char* param, T d
     return dflt;
 }
 
 #define SET_DEFAULT(var, dflt) var = overrideDefault("JIT_OPTION_" #var, dflt)
 DefaultJitOptions::DefaultJitOptions()
 {
     // Whether to perform expensive graph-consistency DEBUG-only assertions.
     // It can be useful to disable this to reduce DEBUG-compile time of large
-    // asm.js programs.
+    // wasm programs.
     SET_DEFAULT(checkGraphConsistency, true);
 
 #ifdef CHECK_OSIPOINT_REGISTERS
     // Emit extra code to verify live regs at the start of a VM call
     // are not modified before its OsiPoint.
     SET_DEFAULT(checkOsiPointRegisters, false);
 #endif
 
--- a/js/src/jit/JitSpewer.cpp
+++ b/js/src/jit/JitSpewer.cpp
@@ -105,17 +105,17 @@ static bool
 FilterContainsLocation(JSScript* function)
 {
     static const char* filter = getenv("IONFILTER");
 
     // If there is no filter we accept all outputs.
     if (!filter || !filter[0])
         return true;
 
-    // Disable asm.js output when filter is set.
+    // Disable wasm output when filter is set.
     if (!function)
         return false;
 
     const char* filename = function->filename();
     const size_t line = function->lineno();
     const size_t filelen = strlen(filename);
     const char* index = strstr(filter, filename);
     while (index) {
--- a/js/src/jit/JitSpewer.h
+++ b/js/src/jit/JitSpewer.h
@@ -117,17 +117,17 @@ class MIRGraph;
 class TempAllocator;
 
 // The JitSpewer is only available on debug builds.
 // None of the global functions have effect on non-debug builds.
 static const int NULL_ID = -1;
 
 #ifdef JS_JITSPEW
 
-// Class made to hold the MIR and LIR graphs of an AsmJS / Ion compilation.
+// Class made to hold the MIR and LIR graphs of an Wasm / Ion compilation.
 class GraphSpewer
 {
   private:
     MIRGraph* graph_;
     LSprinter c1Printer_;
     LSprinter jsonPrinter_;
     C1Spewer c1Spewer_;
     JSONSpewer jsonSpewer_;
--- a/js/src/jit/Linker.cpp
+++ b/js/src/jit/Linker.cpp
@@ -10,17 +10,17 @@
 
 namespace js {
 namespace jit {
 
 template <AllowGC allowGC>
 JitCode*
 Linker::newCode(JSContext* cx, CodeKind kind, bool hasPatchableBackedges /* = false */)
 {
-    MOZ_ASSERT(masm.numAsmJSAbsoluteAddresses() == 0);
+    MOZ_ASSERT(masm.numWasmAbsoluteAddresses() == 0);
     MOZ_ASSERT_IF(hasPatchableBackedges, kind == ION_CODE);
 
     gc::AutoSuppressGC suppressGC(cx);
     if (masm.oom())
         return fail(cx);
 
     ExecutablePool* pool;
     size_t bytesNeeded = masm.bytesNeeded() + sizeof(JitCode*) + CodeAlignment;
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -88,17 +88,17 @@ void
 LIRGenerator::visitIsConstructing(MIsConstructing* ins)
 {
     define(new(alloc()) LIsConstructing(), ins);
 }
 
 static void
 TryToUseImplicitInterruptCheck(MIRGraph& graph, MBasicBlock* backedge)
 {
-    // Implicit interrupt checks require asm.js signal handlers to be installed.
+    // Implicit interrupt checks require wasm signal handlers to be installed.
     if (!wasm::HaveSignalHandlers() || JitOptions.ionInterruptWithoutSignals)
         return;
 
     // To avoid triggering expensive interrupts (backedge patching) in
     // requestMajorGC and requestMinorGC, use an implicit interrupt check only
     // if the loop body can not trigger GC or affect GC state like the store
     // buffer. We do this by checking there are no safepoints attached to LIR
     // instructions inside the loop.
@@ -4244,17 +4244,17 @@ LIRGenerator::visitWasmReturn(MWasmRetur
         lir->setOperand(0, useFixed(rval, ReturnFloat32Reg));
     else if (rval->type() == MIRType::Double)
         lir->setOperand(0, useFixed(rval, ReturnDoubleReg));
     else if (IsSimdType(rval->type()))
         lir->setOperand(0, useFixed(rval, ReturnSimd128Reg));
     else if (rval->type() == MIRType::Int32)
         lir->setOperand(0, useFixed(rval, ReturnReg));
     else
-        MOZ_CRASH("Unexpected asm.js return type");
+        MOZ_CRASH("Unexpected wasm return type");
 
     // Preserve the TLS pointer we were passed in `WasmTlsReg`.
     MDefinition* tlsPtr = ins->getOperand(1);
     lir->setOperand(1, useFixed(tlsPtr, WasmTlsReg));
 
     add(lir);
 }
 
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -412,18 +412,18 @@ AliasSet::Name(size_t flag)
     switch(flag) {
       case 0: return "ObjectFields";
       case 1: return "Element";
       case 2: return "UnboxedElement";
       case 3: return "DynamicSlot";
       case 4: return "FixedSlot";
       case 5: return "DOMProperty";
       case 6: return "FrameArgument";
-      case 7: return "AsmJSGlobalVar";
-      case 8: return "AsmJSHeap";
+      case 7: return "WasmGlobalVar";
+      case 8: return "WasmHeap";
       case 9: return "TypedArrayLength";
       default:
         MOZ_CRASH("Unknown flag");
     }
 }
 
 void
 MTest::cacheOperandMightEmulateUndefined(CompilerConstraintList* constraints)
@@ -1141,17 +1141,17 @@ MConstant::canProduceFloat32() const
     return true;
 }
 
 Value
 MConstant::toJSValue() const
 {
     // Wasm has types like int64 that cannot be stored as js::Value. It also
     // doesn't want the NaN canonicalization enforced by js::Value.
-    MOZ_ASSERT(!IsCompilingAsmJS());
+    MOZ_ASSERT(!IsCompilingWasm());
 
     switch (type()) {
       case MIRType::Undefined:
         return UndefinedValue();
       case MIRType::Null:
         return NullValue();
       case MIRType::Boolean:
         return BooleanValue(toBoolean());
@@ -3936,17 +3936,17 @@ MUrsh::New(TempAllocator& alloc, MDefini
 {
     MUrsh* ins = new(alloc) MUrsh(left, right, type);
     ins->specializeAs(type);
 
     // Since Ion has no UInt32 type, we use Int32 and we have a special
     // exception to the type rules: we can return values in
     // (INT32_MIN,UINT32_MAX] and still claim that we have an Int32 type
     // without bailing out. This is necessary because Ion has no UInt32
-    // type and we can't have bailouts in asm.js code.
+    // type and we can't have bailouts in wasm code.
     ins->bailoutsDisabled_ = true;
 
     return ins;
 }
 
 MResumePoint*
 MResumePoint::New(TempAllocator& alloc, MBasicBlock* block, jsbytecode* pc,
                   Mode mode)
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -377,18 +377,18 @@ class AliasSet {
                                     // a typed object.
         UnboxedElement    = 1 << 2, // An unboxed scalar or reference member of
                                     // a typed array, typed object, or unboxed
                                     // object.
         DynamicSlot       = 1 << 3, // A Value member of obj->slots.
         FixedSlot         = 1 << 4, // A Value member of obj->fixedSlots().
         DOMProperty       = 1 << 5, // A DOM property
         FrameArgument     = 1 << 6, // An argument kept on the stack frame
-        AsmJSGlobalVar    = 1 << 7, // An asm.js global var
-        AsmJSHeap         = 1 << 8, // An asm.js heap load
+        WasmGlobalVar     = 1 << 7, // An asm.js/wasm global var
+        WasmHeap          = 1 << 8, // An asm.js/wasm heap load
         TypedArrayLength  = 1 << 9,// A typed array's length
         Last              = TypedArrayLength,
         Any               = Last | (Last - 1),
 
         NumCategories     = 10,
 
         // Indicates load or store.
         Store_            = 1 << 31
@@ -1926,17 +1926,17 @@ class MSimdExtractElement
     {
         MIRType vecType = obj->type();
         MOZ_ASSERT(IsSimdType(vecType));
         MOZ_ASSERT(lane < SimdTypeToLength(vecType));
         MOZ_ASSERT(!IsSimdType(laneType));
         MOZ_ASSERT((sign != SimdSign::NotApplicable) == IsIntegerSimdType(vecType),
                    "Signedness must be specified for integer SIMD extractLanes");
         // The resulting type should match the lane type.
-        // Allow extracting boolean lanes directly into an Int32 (for asm.js).
+        // Allow extracting boolean lanes directly into an Int32 (for wasm).
         // Allow extracting Uint32 lanes into a double.
         //
         // We also allow extracting Uint32 lanes into a MIRType::Int32. This is
         // equivalent to extracting the Uint32 lane to a double and then
         // applying MTruncateToInt32, but it bypasses the conversion to/from
         // double.
         MOZ_ASSERT(SimdTypeToLaneType(vecType) == laneType ||
                    (IsBooleanSimdType(vecType) && laneType == MIRType::Int32) ||
@@ -5306,17 +5306,17 @@ class MToFloat32
     MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
     bool canRecoverOnBailout() const override {
         return true;
     }
 
     ALLOW_CLONE(MToFloat32)
 };
 
-// Converts a uint32 to a double (coming from asm.js).
+// Converts a uint32 to a double (coming from wasm).
 class MWasmUnsignedToDouble
   : public MUnaryInstruction,
     public NoTypePolicy::Data
 {
     explicit MWasmUnsignedToDouble(MDefinition* def)
       : MUnaryInstruction(def)
     {
         setResultType(MIRType::Double);
@@ -5331,17 +5331,17 @@ class MWasmUnsignedToDouble
     bool congruentTo(const MDefinition* ins) const override {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const override {
         return AliasSet::None();
     }
 };
 
-// Converts a uint32 to a float32 (coming from asm.js).
+// Converts a uint32 to a float32 (coming from wasm).
 class MWasmUnsignedToFloat32
   : public MUnaryInstruction,
     public NoTypePolicy::Data
 {
     explicit MWasmUnsignedToFloat32(MDefinition* def)
       : MUnaryInstruction(def)
     {
         setResultType(MIRType::Float32);
@@ -6120,17 +6120,17 @@ class MSignExtend
     ALLOW_CLONE(MSignExtend)
 };
 
 class MBinaryArithInstruction
   : public MBinaryInstruction,
     public ArithPolicy::Data
 {
     // Implicit truncate flag is set by the truncate backward range analysis
-    // optimization phase, and by asm.js pre-processing. It is used in
+    // optimization phase, and by wasm pre-processing. It is used in
     // NeedNegativeZeroCheck to check if the result of a multiplication needs to
     // produce -0 double value, and for avoiding overflow checks.
 
     // This optimization happens when the multiplication cannot be truncated
     // even if all uses are truncating its result, such as when the range
     // analysis detect a precision loss in the multiplication.
     TruncateKind implicitTruncate_;
 
@@ -13568,18 +13568,18 @@ class MWasmLoad
     const wasm::MemoryAccessDesc& access() const {
         return access_;
     }
 
     AliasSet getAliasSet() const override {
         // When a barrier is needed, make the instruction effectful by giving
         // it a "store" effect.
         if (access_.isAtomic())
-            return AliasSet::Store(AliasSet::AsmJSHeap);
-        return AliasSet::Load(AliasSet::AsmJSHeap);
+            return AliasSet::Store(AliasSet::WasmHeap);
+        return AliasSet::Load(AliasSet::WasmHeap);
     }
 };
 
 class MWasmStore
   : public MBinaryInstruction,
     public NoTypePolicy::Data
 {
     wasm::MemoryAccessDesc access_;
@@ -13596,17 +13596,17 @@ class MWasmStore
     TRIVIAL_NEW_WRAPPERS
     NAMED_OPERANDS((0, base), (1, value))
 
     const wasm::MemoryAccessDesc& access() const {
         return access_;
     }
 
     AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::AsmJSHeap);
+        return AliasSet::Store(AliasSet::WasmHeap);
     }
 };
 
 class MAsmJSMemoryAccess
 {
     uint32_t offset_;
     Scalar::Type accessType_;
     bool needsBoundsCheck_;
@@ -13652,17 +13652,17 @@ class MAsmJSLoadHeap
     INSTRUCTION_HEADER(AsmJSLoadHeap)
     TRIVIAL_NEW_WRAPPERS
 
     MDefinition* base() const { return getOperand(0); }
     void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); }
 
     bool congruentTo(const MDefinition* ins) const override;
     AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::AsmJSHeap);
+        return AliasSet::Load(AliasSet::WasmHeap);
     }
     AliasType mightAlias(const MDefinition* def) const override;
 };
 
 class MAsmJSStoreHeap
   : public MBinaryInstruction,
     public MAsmJSMemoryAccess,
     public NoTypePolicy::Data
@@ -13676,17 +13676,17 @@ class MAsmJSStoreHeap
     INSTRUCTION_HEADER(AsmJSStoreHeap)
     TRIVIAL_NEW_WRAPPERS
 
     MDefinition* base() const { return getOperand(0); }
     void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); }
     MDefinition* value() const { return getOperand(1); }
 
     AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::AsmJSHeap);
+        return AliasSet::Store(AliasSet::WasmHeap);
     }
 };
 
 class MAsmJSCompareExchangeHeap
   : public MQuaternaryInstruction,
     public NoTypePolicy::Data
 {
     wasm::MemoryAccessDesc access_;
@@ -13707,17 +13707,17 @@ class MAsmJSCompareExchangeHeap
     const wasm::MemoryAccessDesc& access() const { return access_; }
 
     MDefinition* base() const { return getOperand(0); }
     MDefinition* oldValue() const { return getOperand(1); }
     MDefinition* newValue() const { return getOperand(2); }
     MDefinition* tls() const { return getOperand(3); }
 
     AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::AsmJSHeap);
+        return AliasSet::Store(AliasSet::WasmHeap);
     }
 };
 
 class MAsmJSAtomicExchangeHeap
   : public MTernaryInstruction,
     public NoTypePolicy::Data
 {
     wasm::MemoryAccessDesc access_;
@@ -13737,17 +13737,17 @@ class MAsmJSAtomicExchangeHeap
 
     const wasm::MemoryAccessDesc& access() const { return access_; }
 
     MDefinition* base() const { return getOperand(0); }
     MDefinition* value() const { return getOperand(1); }
     MDefinition* tls() const { return getOperand(2); }
 
     AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::AsmJSHeap);
+        return AliasSet::Store(AliasSet::WasmHeap);
     }
 };
 
 class MAsmJSAtomicBinopHeap
   : public MTernaryInstruction,
     public NoTypePolicy::Data
 {
     AtomicOp op_;
@@ -13770,17 +13770,17 @@ class MAsmJSAtomicBinopHeap
     AtomicOp operation() const { return op_; }
     const wasm::MemoryAccessDesc& access() const { return access_; }
 
     MDefinition* base() const { return getOperand(0); }
     MDefinition* value() const { return getOperand(1); }
     MDefinition* tls() const { return getOperand(2); }
 
     AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::AsmJSHeap);
+        return AliasSet::Store(AliasSet::WasmHeap);
     }
 };
 
 class MWasmLoadGlobalVar : public MNullaryInstruction
 {
     MWasmLoadGlobalVar(MIRType type, unsigned globalDataOffset, bool isConstant)
       : globalDataOffset_(globalDataOffset), isConstant_(isConstant)
     {
@@ -13798,17 +13798,17 @@ class MWasmLoadGlobalVar : public MNulla
 
     unsigned globalDataOffset() const { return globalDataOffset_; }
 
     HashNumber valueHash() const override;
     bool congruentTo(const MDefinition* ins) const override;
     MDefinition* foldsTo(TempAllocator& alloc) override;
 
     AliasSet getAliasSet() const override {
-        return isConstant_ ? AliasSet::None() : AliasSet::Load(AliasSet::AsmJSGlobalVar);
+        return isConstant_ ? AliasSet::None() : AliasSet::Load(AliasSet::WasmGlobalVar);
     }
 
     AliasType mightAlias(const MDefinition* def) const override;
 };
 
 class MWasmStoreGlobalVar
   : public MUnaryInstruction,
     public NoTypePolicy::Data
@@ -13822,17 +13822,17 @@ class MWasmStoreGlobalVar
   public:
     INSTRUCTION_HEADER(WasmStoreGlobalVar)
     TRIVIAL_NEW_WRAPPERS
 
     unsigned globalDataOffset() const { return globalDataOffset_; }
     MDefinition* value() const { return getOperand(0); }
 
     AliasSet getAliasSet() const override {
-        return AliasSet::Store(AliasSet::AsmJSGlobalVar);
+        return AliasSet::Store(AliasSet::WasmGlobalVar);
     }
 };
 
 class MWasmParameter : public MNullaryInstruction
 {
     ABIArg abi_;
 
     MWasmParameter(ABIArg abi, MIRType mirType)
--- a/js/src/jit/MIRGenerator.h
+++ b/js/src/jit/MIRGenerator.h
@@ -34,18 +34,18 @@ class OptimizationInfo;
 
 class MIRGenerator
 {
   public:
     MIRGenerator(CompileCompartment* compartment, const JitCompileOptions& options,
                  TempAllocator* alloc, MIRGraph* graph,
                  const CompileInfo* info, const OptimizationInfo* optimizationInfo);
 
-    void initMinAsmJSHeapLength(uint32_t init) {
-        minAsmJSHeapLength_ = init;
+    void initMinWasmHeapLength(uint32_t init) {
+        minWasmHeapLength_ = init;
     }
 
     TempAllocator& alloc() {
         return *alloc_;
     }
     MIRGraph& graph() {
         return *graph_;
     }
@@ -134,18 +134,18 @@ class MIRGenerator
         MOZ_ASSERT(compilingWasm());
         return wasmMaxStackArgBytes_;
     }
     void initWasmMaxStackArgBytes(uint32_t n) {
         MOZ_ASSERT(compilingWasm());
         MOZ_ASSERT(wasmMaxStackArgBytes_ == 0);
         wasmMaxStackArgBytes_ = n;
     }
-    uint32_t minAsmJSHeapLength() const {
-        return minAsmJSHeapLength_;
+    uint32_t minWasmHeapLength() const {
+        return minWasmHeapLength_;
     }
     void setPerformsCall() {
         performsCall_ = true;
     }
     bool performsCall() const {
         return performsCall_;
     }
     // Traverses the graph to find if there's any SIMD instruction. Costful but
@@ -190,30 +190,30 @@ class MIRGenerator
     bool modifiesFrameArguments_;
 
     bool instrumentedProfiling_;
     bool instrumentedProfilingIsCached_;
     bool safeForMinorGC_;
 
     void addAbortedPreliminaryGroup(ObjectGroup* group);
 
-    uint32_t minAsmJSHeapLength_;
+    uint32_t minWasmHeapLength_;
 
     void setForceAbort() {
         shouldForceAbort_ = true;
     }
     bool shouldForceAbort() {
         return shouldForceAbort_;
     }
 
 #if defined(JS_ION_PERF)
-    AsmJSPerfSpewer asmJSPerfSpewer_;
+    WasmPerfSpewer wasmPerfSpewer_;
 
   public:
-    AsmJSPerfSpewer& perfSpewer() { return asmJSPerfSpewer_; }
+    WasmPerfSpewer& perfSpewer() { return wasmPerfSpewer_; }
 #endif
 
   public:
     const JitCompileOptions options;
 
   private:
     GraphSpewer gs_;
 
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -34,17 +34,17 @@ MIRGenerator::MIRGenerator(CompileCompar
     wasmMaxStackArgBytes_(0),
     performsCall_(false),
     usesSimd_(false),
     cachedUsesSimd_(false),
     modifiesFrameArguments_(false),
     instrumentedProfiling_(false),
     instrumentedProfilingIsCached_(false),
     safeForMinorGC_(true),
-    minAsmJSHeapLength_(0),
+    minWasmHeapLength_(0),
     options(options),
     gs_(alloc)
 { }
 
 bool
 MIRGenerator::usesSimd()
 {
     if (cachedUsesSimd_)
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -1590,17 +1590,17 @@ AssumeUnreachable_(const char* output) {
     MOZ_ReportAssertionFailure(output, __FILE__, __LINE__);
 }
 #endif
 
 void
 MacroAssembler::assumeUnreachable(const char* output)
 {
 #ifdef DEBUG
-    if (!IsCompilingAsmJS()) {
+    if (!IsCompilingWasm()) {
         AllocatableRegisterSet regs(RegisterSet::Volatile());
         LiveRegisterSet save(regs.asLiveSet());
         PushRegsInMask(save);
         Register temp = regs.takeAnyGeneral();
 
         setupUnalignedABICall(temp);
         movePtr(ImmPtr(output), temp);
         passABIArg(temp);
@@ -2655,17 +2655,17 @@ MacroAssembler::callWithABINoProfiler(vo
     call(ImmPtr(fun));
     callWithABIPost(stackAdjust, result);
 }
 
 void
 MacroAssembler::callWithABINoProfiler(wasm::SymbolicAddress imm, MoveOp::Type result)
 {
     uint32_t stackAdjust;
-    callWithABIPre(&stackAdjust, /* callFromAsmJS = */ true);
+    callWithABIPre(&stackAdjust, /* callFromWasm = */ true);
     call(imm);
     callWithABIPost(stackAdjust, result);
 }
 
 // ===============================================================
 // Exit frame footer.
 
 void
@@ -2878,17 +2878,17 @@ MacroAssembler::wasmEmitTrapOutOfLineCod
             // current pre-prologue stack/register state.
             append(wasm::TrapFarJump(site.trap, farJumpWithPatch()));
         } else {
             // Inherit the frame depth of the trap site. This value is captured
             // by the wasm::CallSite to allow unwinding this frame.
             setFramePushed(site.framePushed);
 
             // Align the stack for a nullary call.
-            size_t alreadyPushed = sizeof(AsmJSFrame) + framePushed();
+            size_t alreadyPushed = sizeof(WasmFrame) + framePushed();
             size_t toPush = ABIArgGenerator().stackBytesConsumedSoFar();
             if (size_t dec = StackDecrementForCall(ABIStackAlignment, alreadyPushed, toPush))
                 reserveStack(dec);
 
             // Call the trap's exit, using the bytecode offset of the trap site.
             // Note that this code is inside the same CodeRange::Function as the
             // trap site so it's as if the trapping instruction called the
             // trap-handling function. The frame iterator knows to skip the trap
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -348,19 +348,19 @@ class MacroAssembler : public MacroAssem
 #endif
     }
 
     // This constructor should only be used when there is no JitContext active
     // (for example, Trampoline-$(ARCH).cpp and IonCaches.cpp).
     explicit MacroAssembler(JSContext* cx, IonScript* ion = nullptr,
                             JSScript* script = nullptr, jsbytecode* pc = nullptr);
 
-    // asm.js compilation handles its own JitContext-pushing
-    struct AsmJSToken {};
-    explicit MacroAssembler(AsmJSToken, TempAllocator& alloc)
+    // wasm compilation handles its own JitContext-pushing
+    struct WasmToken {};
+    explicit MacroAssembler(WasmToken, TempAllocator& alloc)
       : framePushed_(0),
 #ifdef DEBUG
         inCall_(false),
 #endif
         emitProfilingInstrumentation_(false)
     {
         moveResolver_.setAllocator(alloc);
 
@@ -545,17 +545,17 @@ class MacroAssembler : public MacroAssem
     inline void callWithABI(const T& fun, MoveOp::Type result = MoveOp::GENERAL);
 
   private:
     // Reinitialize the variables which have to be cleared before making a call
     // with callWithABI.
     void setupABICall();
 
     // Reserve the stack and resolve the arguments move.
-    void callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS = false) PER_ARCH;
+    void callWithABIPre(uint32_t* stackAdjust, bool callFromWasm = false) PER_ARCH;
 
     // Emits a call to a C/C++ function, resolving all argument moves.
     void callWithABINoProfiler(void* fun, MoveOp::Type result);
     void callWithABINoProfiler(wasm::SymbolicAddress imm, MoveOp::Type result);
     void callWithABINoProfiler(Register fun, MoveOp::Type result) PER_ARCH;
     void callWithABINoProfiler(const Address& fun, MoveOp::Type result) PER_ARCH;
 
     // Restore the stack to its state before the setup function call.
--- a/js/src/jit/PerfSpewer.cpp
+++ b/js/src/jit/PerfSpewer.cpp
@@ -314,17 +314,17 @@ js::jit::writePerfSpewerJitCodeProfile(J
                 reinterpret_cast<uintptr_t>(code->raw()),
                 size, msg, code->raw(), size);
     }
 
     unlockPerfMap();
 }
 
 void
-js::jit::writePerfSpewerAsmJSFunctionMap(uintptr_t base, uintptr_t size,
+js::jit::writePerfSpewerWasmFunctionMap(uintptr_t base, uintptr_t size,
                                          const char* filename, unsigned lineno, unsigned colIndex,
                                          const char* funcName)
 {
     if (!PerfFuncEnabled() || size == 0U)
         return;
 
     if (!lockPerfMap())
         return;
--- a/js/src/jit/PerfSpewer.h
+++ b/js/src/jit/PerfSpewer.h
@@ -71,25 +71,25 @@ class PerfSpewer
     MOZ_MUST_USE bool noteEndInlineCode(MacroAssembler& masm);
 
     void writeProfile(JSScript* script, JitCode* code, MacroAssembler& masm);
 };
 
 void writePerfSpewerBaselineProfile(JSScript* script, JitCode* code);
 void writePerfSpewerJitCodeProfile(JitCode* code, const char* msg);
 
-// AsmJS doesn't support block annotations.
-class AsmJSPerfSpewer : public PerfSpewer
+// wasm doesn't support block annotations.
+class WasmPerfSpewer : public PerfSpewer
 {
   public:
     MOZ_MUST_USE bool startBasicBlock(MBasicBlock* blk, MacroAssembler& masm) { return true; }
     MOZ_MUST_USE bool endBasicBlock(MacroAssembler& masm) { return true; }
 };
 
-void writePerfSpewerAsmJSFunctionMap(uintptr_t base, uintptr_t size, const char* filename,
-                                     unsigned lineno, unsigned colIndex, const char* funcName);
+void writePerfSpewerWasmFunctionMap(uintptr_t base, uintptr_t size, const char* filename,
+                                    unsigned lineno, unsigned colIndex, const char* funcName);
 
 #endif // JS_ION_PERF
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_PerfSpewer_h */
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -2674,18 +2674,18 @@ MLimitedTruncate::needTruncation(Truncat
     if (kind >= IndirectTruncate && range())
         range()->wrapAroundToInt32();
     return false;
 }
 
 bool
 MCompare::needTruncation(TruncateKind kind)
 {
-    // If we're compiling AsmJS, don't try to optimize the comparison type, as
-    // the code presumably is already using the type it wants. Also, AsmJS
+    // If we're compiling wasm, don't try to optimize the comparison type, as
+    // the code presumably is already using the type it wants. Also, wasm
     // doesn't support bailouts, so we woudn't be able to rely on
     // TruncateAfterBailouts to convert our inputs.
     if (block()->info().compilingWasm())
        return false;
 
     if (!isDoubleComparison())
         return false;
 
@@ -3086,19 +3086,19 @@ AdjustTruncatedInputs(TempAllocator& all
 //
 // We iterate backward because it is likely that a truncated operation truncates
 // some of its operands.
 bool
 RangeAnalysis::truncate()
 {
     JitSpew(JitSpew_Range, "Do range-base truncation (backward loop)");
 
-    // Automatic truncation is disabled for AsmJS because the truncation logic
+    // Automatic truncation is disabled for wasm because the truncation logic
     // is based on IonMonkey which assumes that we can bailout if the truncation
-    // logic fails. As AsmJS code has no bailout mechanism, it is safer to avoid
+    // logic fails. As wasm code has no bailout mechanism, it is safer to avoid
     // any automatic truncations.
     MOZ_ASSERT(!mir->compilingWasm());
 
     Vector<MDefinition*, 16, SystemAllocPolicy> worklist;
 
     for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd(); block++) {
         for (MInstructionReverseIterator iter(block->rbegin()); iter != block->rend(); iter++) {
             if (iter->isRecoveredOnBailout())
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -24,18 +24,18 @@
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::CountLeadingZeroes32;
 
 void dbg_break() {}
 
-// The ABIArgGenerator is used for making system ABI calls and for inter-AsmJS
-// calls. The system ABI can either be SoftFp or HardFp, and inter-AsmJS calls
+// The ABIArgGenerator is used for making system ABI calls and for inter-wasm
+// calls. The system ABI can either be SoftFp or HardFp, and inter-wasm calls
 // are always HardFp calls. The initialization defaults to HardFp, and the ABI
 // choice is made before any system ABI calls with the method "setUseHardFp".
 ABIArgGenerator::ABIArgGenerator()
   : intRegIndex_(0),
     floatRegIndex_(0),
     stackOffset_(0),
     current_(),
     useHardFp_(true)
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -83,17 +83,17 @@ class ABIArgGenerator
 {
     unsigned intRegIndex_;
     unsigned floatRegIndex_;
     uint32_t stackOffset_;
     ABIArg current_;
 
     // ARM can either use HardFp (use float registers for float arguments), or
     // SoftFp (use general registers for float arguments) ABI.  We keep this
-    // switch as a runtime switch because AsmJS always use the HardFp back-end
+    // switch as a runtime switch because wasm always use the HardFp back-end
     // while the calls to native functions have to use the one provided by the
     // system.
     bool useHardFp_;
 
     ABIArg softNext(MIRType argType);
     ABIArg hardNext(MIRType argType);
 
   public:
@@ -114,17 +114,17 @@ static constexpr Register ABINonArgReg2 
 static constexpr Register ABINonArgReturnReg0 = r4;
 static constexpr Register ABINonArgReturnReg1 = r5;
 
 // TLS pointer argument register for WebAssembly functions. This must not alias
 // any other register used for passing function arguments or return values.
 // Preserved by WebAssembly functions.
 static constexpr Register WasmTlsReg = r9;
 
-// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// Registers used for wasm table calls. These registers must be disjoint
 // from the ABI argument registers, WasmTlsReg and each other.
 static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
 static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
 static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
 
 static constexpr Register PreBarrierReg = r1;
 
 static constexpr Register InvalidReg = { Registers::invalid_reg };
@@ -156,30 +156,30 @@ struct ScratchDoubleScope : public AutoF
     explicit ScratchDoubleScope(MacroAssembler& masm)
       : AutoFloatRegisterScope(masm, ScratchDoubleReg)
     { }
 };
 
 // A bias applied to the GlobalReg to allow the use of instructions with small
 // negative immediate offsets which doubles the range of global data that can be
 // accessed with a single instruction.
-static const int32_t AsmJSGlobalRegBias = 1024;
+static const int32_t WasmGlobalRegBias = 1024;
 
 // Registers used in the GenerateFFIIonExit Enable Activation block.
-static constexpr Register AsmJSIonExitRegCallee = r4;
-static constexpr Register AsmJSIonExitRegE0 = r0;
-static constexpr Register AsmJSIonExitRegE1 = r1;
+static constexpr Register WasmIonExitRegCallee = r4;
+static constexpr Register WasmIonExitRegE0 = r0;
+static constexpr Register WasmIonExitRegE1 = r1;
 
 // Registers used in the GenerateFFIIonExit Disable Activation block.
 // None of these may be the second scratch register (lr).
-static constexpr Register AsmJSIonExitRegReturnData = r2;
-static constexpr Register AsmJSIonExitRegReturnType = r3;
-static constexpr Register AsmJSIonExitRegD0 = r0;
-static constexpr Register AsmJSIonExitRegD1 = r1;
-static constexpr Register AsmJSIonExitRegD2 = r4;
+static constexpr Register WasmIonExitRegReturnData = r2;
+static constexpr Register WasmIonExitRegReturnType = r3;
+static constexpr Register WasmIonExitRegD0 = r0;
+static constexpr Register WasmIonExitRegD1 = r1;
+static constexpr Register WasmIonExitRegD2 = r4;
 
 // Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
 static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
 static constexpr Register RegExpMatcherStringReg = CallTempReg1;
 static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
 
 // Registerd used in RegExpTester instruction (do not use ReturnReg).
 static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
@@ -227,17 +227,17 @@ static_assert(CodeAlignment % SimdMemory
   "Code alignment should be larger than any of the alignments which are used for "
   "the constant sections of the code buffer.  Thus it should be larger than the "
   "alignment for SIMD constants.");
 
 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
   "Stack alignment should be larger than any of the alignments which are used for "
   "spilled values.  Thus it should be larger than the alignment for SIMD accesses.");
 
-static const uint32_t AsmJSStackAlignment = SimdMemoryAlignment;
+static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
 
 // Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
 static constexpr bool SupportsUint32x4FloatConversions = false;
 
 // Does this architecture support comparisons of unsigned integer vectors?
 static constexpr bool SupportsUint8x16Compares = false;
 static constexpr bool SupportsUint16x8Compares = false;
 static constexpr bool SupportsUint32x4Compares = false;
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2329,17 +2329,17 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAs
                 output = output.singleOverlay();
 
             Assembler::Condition cond = Assembler::Always;
             if (mir->needsBoundsCheck()) {
                 BufferOffset cmp = masm.as_cmp(ptrReg, Imm8(0));
                 masm.append(wasm::BoundsCheck(cmp.getOffset()));
 
                 size_t nanOffset = size == 32 ? wasm::NaN32GlobalDataOffset : wasm::NaN64GlobalDataOffset;
-                masm.ma_vldr(Address(GlobalReg, nanOffset - AsmJSGlobalRegBias), output, scratch,
+                masm.ma_vldr(Address(GlobalReg, nanOffset - WasmGlobalRegBias), output, scratch,
                              Assembler::AboveOrEqual);
                 cond = Assembler::Below;
             }
 
             masm.ma_vldr(output, HeapReg, ptrReg, scratch, 0, cond);
         } else {
             Register output = ToRegister(ins->output());
 
@@ -3083,17 +3083,17 @@ CodeGeneratorARM::visitEffectiveAddress(
     masm.as_add(output, base, lsl(index, mir->scale()));
     masm.ma_add(Imm32(mir->displacement()), output, scratch);
 }
 
 void
 CodeGeneratorARM::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
 {
     const MWasmLoadGlobalVar* mir = ins->mir();
-    unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
+    unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
 
     ScratchRegisterScope scratch(masm);
 
     if (mir->type() == MIRType::Int32) {
         masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output()), scratch);
     } else if (mir->type() == MIRType::Float32) {
         VFPRegister vd(ToFloatRegister(ins->output()));
         masm.ma_vldr(Address(GlobalReg, addr), vd.singleOverlay(), scratch);
@@ -3102,50 +3102,50 @@ CodeGeneratorARM::visitWasmLoadGlobalVar
         masm.ma_vldr(Address(GlobalReg, addr), ToFloatRegister(ins->output()), scratch);
     }
 }
 
 void
 CodeGeneratorARM::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
 {
     const MWasmLoadGlobalVar* mir = ins->mir();
-    unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
+    unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
     MOZ_ASSERT(mir->type() == MIRType::Int64);
     Register64 output = ToOutRegister64(ins);
 
     ScratchRegisterScope scratch(masm);
     masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr + INT64LOW_OFFSET), output.low, scratch);
     masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), output.high, scratch);
 }
 
 void
 CodeGeneratorARM::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
 {
     const MWasmStoreGlobalVar* mir = ins->mir();
     MIRType type = mir->value()->type();
 
     ScratchRegisterScope scratch(masm);
 
-    unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
+    unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
     if (type == MIRType::Int32) {
         masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value()), scratch);
     } else if (type == MIRType::Float32) {
         VFPRegister vd(ToFloatRegister(ins->value()));
         masm.ma_vstr(vd.singleOverlay(), Address(GlobalReg, addr), scratch);
     } else {
         MOZ_ASSERT(type == MIRType::Double);
         masm.ma_vstr(ToFloatRegister(ins->value()), Address(GlobalReg, addr), scratch);
     }
 }
 
 void
 CodeGeneratorARM::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
 {
     const MWasmStoreGlobalVar* mir = ins->mir();
-    unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
+    unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
     MOZ_ASSERT (mir->value()->type() == MIRType::Int64);
     Register64 input = ToRegister64(ins->value());
 
     ScratchRegisterScope scratch(masm);
     masm.ma_dtr(IsStore, GlobalReg, Imm32(addr + INT64LOW_OFFSET), input.low, scratch);
     masm.ma_dtr(IsStore, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), input.high, scratch);
 }
 
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -1872,17 +1872,17 @@ void
 MacroAssemblerARMCompat::movePtr(ImmPtr imm, Register dest)
 {
     movePtr(ImmWord(uintptr_t(imm.value)), dest);
 }
 
 void
 MacroAssemblerARMCompat::movePtr(wasm::SymbolicAddress imm, Register dest)
 {
-    append(AsmJSAbsoluteAddress(CodeOffset(currentOffset()), imm));
+    append(WasmAbsoluteAddress(CodeOffset(currentOffset()), imm));
     ma_movPatchable(Imm32(-1), dest, Always);
 }
 
 void
 MacroAssemblerARMCompat::load8ZeroExtend(const Address& address, Register dest)
 {
     ScratchRegisterScope scratch(asMasm());
     ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest, scratch);
@@ -5103,28 +5103,28 @@ MacroAssembler::setupUnalignedABICall(Re
 
     ma_mov(sp, scratch);
     // Force sp to be aligned.
     as_bic(sp, sp, Imm8(ABIStackAlignment - 1));
     ma_push(scratch);
 }
 
 void
-MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
 {
     MOZ_ASSERT(inCall_);
     uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
 
     if (dynamicAlignment_) {
         // sizeof(intptr_t) accounts for the saved stack pointer pushed by
         // setupUnalignedABICall.
         stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
                                              ABIStackAlignment);
     } else {
-        uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
+        uint32_t alignmentAtPrologue = callFromWasm ? sizeof(WasmFrame) : 0;
         stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
                                              ABIStackAlignment);
     }
 
     *stackAdjust = stackForCall;
     reserveStack(stackForCall);
 
     // Position all arguments.
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -1532,23 +1532,23 @@ class MacroAssemblerARMCompat : public M
         as_bx(lr);
     }
 
     void moveFloat32(FloatRegister src, FloatRegister dest, Condition cc = Always) {
         as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc);
     }
 
     void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
-        loadPtr(Address(GlobalReg, globalDataOffset - AsmJSGlobalRegBias), dest);
+        loadPtr(Address(GlobalReg, globalDataOffset - WasmGlobalRegBias), dest);
     }
     void loadWasmPinnedRegsFromTls() {
         ScratchRegisterScope scratch(asMasm());
         ma_ldr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg, scratch);
         ma_ldr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg, scratch);
-        ma_add(Imm32(AsmJSGlobalRegBias), GlobalReg, scratch);
+        ma_add(Imm32(WasmGlobalRegBias), GlobalReg, scratch);
     }
 
     // Instrumentation for entering and leaving the profiler.
     void profilerEnterFrame(Register framePtr, Register scratch);
     void profilerExitFrame();
 };
 
 typedef MacroAssemblerARMCompat MacroAssemblerSpecific;
--- a/js/src/jit/arm/Simulator-arm.cpp
+++ b/js/src/jit/arm/Simulator-arm.cpp
@@ -4732,17 +4732,17 @@ Simulator::execute()
             if (single_stepping_)
                 single_step_callback_(single_step_callback_arg_, this, (void*)program_counter);
             SimInstruction* instr = reinterpret_cast<SimInstruction*>(program_counter);
             instructionDecode(instr);
             icount_++;
 
             int32_t rpc = resume_pc_;
             if (MOZ_UNLIKELY(rpc != 0)) {
-                // AsmJS signal handler ran and we have to adjust the pc.
+                // wasm signal handler ran and we have to adjust the pc.
                 JSRuntime::innermostWasmActivation()->setResumePC((void*)get_pc());
                 set_pc(rpc);
                 resume_pc_ = 0;
             }
         }
         program_counter = get_pc();
     }
 
--- a/js/src/jit/arm64/Assembler-arm64.cpp
+++ b/js/src/jit/arm64/Assembler-arm64.cpp
@@ -22,17 +22,17 @@
 #include "gc/StoreBuffer-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::CountLeadingZeroes32;
 using mozilla::DebugOnly;
 
-// Note this is used for inter-AsmJS calls and may pass arguments and results
+// Note this is used for inter-wasm calls and may pass arguments and results
 // in floating point registers even if the system ABI does not.
 
 ABIArg
 ABIArgGenerator::next(MIRType type)
 {
     switch (type) {
       case MIRType::Int32:
       case MIRType::Pointer:
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -18,17 +18,16 @@ namespace jit {
 typedef vixl::Register ARMRegister;
 typedef vixl::FPRegister ARMFPRegister;
 using vixl::ARMBuffer;
 using vixl::Instruction;
 
 static const uint32_t AlignmentAtPrologue = 0;
 static const uint32_t AlignmentMidPrologue = 8;
 static const Scale ScalePointer = TimesEight;
-static const uint32_t AlignmentAtAsmJSPrologue = sizeof(void*);
 
 // The MacroAssembler uses scratch registers extensively and unexpectedly.
 // For safety, scratch registers should always be acquired using
 // vixl::UseScratchRegisterScope.
 static constexpr Register ScratchReg = { Registers::ip0 };
 static constexpr ARMRegister ScratchReg64 = { ScratchReg, 64 };
 
 static constexpr Register ScratchReg2 = { Registers::ip1 };
@@ -115,27 +114,27 @@ static constexpr ARMRegister sp = vixl::
 static constexpr ARMFPRegister s##N = vixl::s##N;  \
 static constexpr ARMFPRegister d##N = vixl::d##N;
 REGISTER_CODE_LIST(IMPORT_VIXL_VREGISTERS)
 #undef IMPORT_VIXL_VREGISTERS
 
 static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
 
 // Registers used in the GenerateFFIIonExit Enable Activation block.
-static constexpr Register AsmJSIonExitRegCallee = r8;
-static constexpr Register AsmJSIonExitRegE0 = r0;
-static constexpr Register AsmJSIonExitRegE1 = r1;
+static constexpr Register WasmIonExitRegCallee = r8;
+static constexpr Register WasmIonExitRegE0 = r0;
+static constexpr Register WasmIonExitRegE1 = r1;
 
 // Registers used in the GenerateFFIIonExit Disable Activation block.
 // None of these may be the second scratch register.
-static constexpr Register AsmJSIonExitRegReturnData = r2;
-static constexpr Register AsmJSIonExitRegReturnType = r3;
-static constexpr Register AsmJSIonExitRegD0 = r0;
-static constexpr Register AsmJSIonExitRegD1 = r1;
-static constexpr Register AsmJSIonExitRegD2 = r4;
+static constexpr Register WasmIonExitRegReturnData = r2;
+static constexpr Register WasmIonExitRegReturnType = r3;
+static constexpr Register WasmIonExitRegD0 = r0;
+static constexpr Register WasmIonExitRegD1 = r1;
+static constexpr Register WasmIonExitRegD2 = r4;
 
 // Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
 static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
 static constexpr Register RegExpMatcherStringReg = CallTempReg1;
 static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
 
 // Registerd used in RegExpTester instruction (do not use ReturnReg).
 static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
@@ -165,18 +164,18 @@ static_assert(JitStackAlignment % sizeof
 static constexpr bool SupportsSimd = false;
 static constexpr uint32_t SimdMemoryAlignment = 16;
 
 static_assert(CodeAlignment % SimdMemoryAlignment == 0,
   "Code alignment should be larger than any of the alignments which are used for "
   "the constant sections of the code buffer.  Thus it should be larger than the "
   "alignment for SIMD constants.");
 
-static const uint32_t AsmJSStackAlignment = SimdMemoryAlignment;
-static const int32_t AsmJSGlobalRegBias = 1024;
+static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static const int32_t WasmGlobalRegBias = 1024;
 
 // Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
 static constexpr bool SupportsUint32x4FloatConversions = false;
 
 // Does this architecture support comparisons of unsigned integer vectors?
 static constexpr bool SupportsUint8x16Compares = false;
 static constexpr bool SupportsUint16x8Compares = false;
 static constexpr bool SupportsUint32x4Compares = false;
@@ -465,17 +464,17 @@ static constexpr Register ABINonArgReg2 
 static constexpr Register ABINonArgReturnReg0 = r8;
 static constexpr Register ABINonArgReturnReg1 = r9;
 
 // TLS pointer argument register for WebAssembly functions. This must not alias
 // any other register used for passing function arguments or return values.
 // Preserved by WebAssembly functions.
 static constexpr Register WasmTlsReg = { Registers::x17 };
 
-// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// Registers used for wasm table calls. These registers must be disjoint
 // from the ABI argument registers, WasmTlsReg and each other.
 static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
 static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
 static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
 
 static inline bool
 GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
 {
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -621,17 +621,17 @@ MacroAssembler::setupUnalignedABICall(Re
     // If the PseudoStackPointer is used, sp must be <= psp before a write is valid.
     syncStackPtr();
 
     // Store previous sp to the top of the stack, aligned.
     Str(scratch64, MemOperand(GetStackPointer64(), 0));
 }
 
 void
-MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
 {
     MOZ_ASSERT(inCall_);
     uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
 
     // ARM64 /really/ wants the stack to always be aligned.  Since we're already tracking it
     // getting it aligned for an abi call is pretty easy.
     MOZ_ASSERT(dynamicAlignment_);
     stackForCall += ComputeByteAlignment(stackForCall, StackAlignment);
--- a/js/src/jit/arm64/MacroAssembler-arm64.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -707,17 +707,17 @@ class MacroAssemblerCompat : public vixl
     void movePtr(ImmWord imm, Register dest) {
         Mov(ARMRegister(dest, 64), int64_t(imm.value));
     }
     void movePtr(ImmPtr imm, Register dest) {
         Mov(ARMRegister(dest, 64), int64_t(imm.value));
     }
     void movePtr(wasm::SymbolicAddress imm, Register dest) {
         BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest);
-        append(AsmJSAbsoluteAddress(CodeOffset(off.getOffset()), imm));
+        append(WasmAbsoluteAddress(CodeOffset(off.getOffset()), imm));
     }
     void movePtr(ImmGCPtr imm, Register dest) {
         BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest);
         writeDataRelocation(imm, load);
     }
 
     void mov(ImmWord imm, Register dest) {
         movePtr(imm, dest);
@@ -2300,22 +2300,22 @@ class MacroAssemblerCompat : public vixl
     // and assert that the value is equal to the current sp.
     void simulatorCheckSP() {
 #ifdef JS_SIMULATOR_ARM64
         svc(vixl::kCheckStackPointer);
 #endif
     }
 
     void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
-        loadPtr(Address(GlobalReg, globalDataOffset - AsmJSGlobalRegBias), dest);
+        loadPtr(Address(GlobalReg, globalDataOffset - WasmGlobalRegBias), dest);
     }
     void loadWasmPinnedRegsFromTls() {
         loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg);
         loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg);
-        adds32(Imm32(AsmJSGlobalRegBias), GlobalReg);
+        adds32(Imm32(WasmGlobalRegBias), GlobalReg);
     }
 
     // Overwrites the payload bits of a dest register containing a Value.
     void movePayload(Register src, Register dest) {
         // Bfxil cannot be used with the zero register as a source.
         if (src == rzr)
             And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(~int64_t(JSVAL_PAYLOAD_MASK)));
         else
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -105,28 +105,28 @@ static constexpr Register StackPointer =
 static constexpr Register FramePointer = InvalidReg;
 static constexpr Register ReturnReg = v0;
 static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
 static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
 
 // A bias applied to the GlobalReg to allow the use of instructions with small
 // negative immediate offsets which doubles the range of global data that can be
 // accessed with a single instruction.
-static const int32_t AsmJSGlobalRegBias = 32768;
+static const int32_t WasmGlobalRegBias = 32768;
 
 // Registers used in the GenerateFFIIonExit Enable Activation block.
-static constexpr Register AsmJSIonExitRegCallee = t0;
-static constexpr Register AsmJSIonExitRegE0 = a0;
-static constexpr Register AsmJSIonExitRegE1 = a1;
+static constexpr Register WasmIonExitRegCallee = t0;
+static constexpr Register WasmIonExitRegE0 = a0;
+static constexpr Register WasmIonExitRegE1 = a1;
 
 // Registers used in the GenerateFFIIonExit Disable Activation block.
 // None of these may be the second scratch register (t8).
-static constexpr Register AsmJSIonExitRegD0 = a0;
-static constexpr Register AsmJSIonExitRegD1 = a1;
-static constexpr Register AsmJSIonExitRegD2 = t0;
+static constexpr Register WasmIonExitRegD0 = a0;
+static constexpr Register WasmIonExitRegD1 = a1;
+static constexpr Register WasmIonExitRegD2 = t0;
 
 // Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
 static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
 static constexpr Register RegExpMatcherStringReg = CallTempReg1;
 static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
 
 // Registerd used in RegExpTester instruction (do not use ReturnReg).
 static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -2113,20 +2113,20 @@ CodeGeneratorMIPSShared::visitAsmJSLoadH
         masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
                      static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
     }
     masm.ma_b(&done, ShortJump);
     masm.bind(&outOfRange);
     // Offset is out of range. Load default values.
     if (isFloat) {
         if (size == 32)
-            masm.loadFloat32(Address(GlobalReg, wasm::NaN32GlobalDataOffset - AsmJSGlobalRegBias),
+            masm.loadFloat32(Address(GlobalReg, wasm::NaN32GlobalDataOffset - WasmGlobalRegBias),
                              ToFloatRegister(out));
         else
-            masm.loadDouble(Address(GlobalReg, wasm::NaN64GlobalDataOffset - AsmJSGlobalRegBias),
+            masm.loadDouble(Address(GlobalReg, wasm::NaN64GlobalDataOffset - WasmGlobalRegBias),
                             ToFloatRegister(out));
     } else {
         masm.move32(Imm32(0), ToRegister(out));
     }
     masm.bind(&done);
 
     masm.append(wasm::BoundsCheck(bo.getOffset()));
 }
@@ -2454,32 +2454,32 @@ CodeGeneratorMIPSShared::visitEffectiveA
     BaseIndex address(base, index, mir->scale(), mir->displacement());
     masm.computeEffectiveAddress(address, output);
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
 {
     const MWasmLoadGlobalVar* mir = ins->mir();
-    unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
+    unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
     if (mir->type() == MIRType::Int32)
         masm.load32(Address(GlobalReg, addr), ToRegister(ins->output()));
     else if (mir->type() == MIRType::Float32)
         masm.loadFloat32(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
     else
         masm.loadDouble(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
 {
     const MWasmStoreGlobalVar* mir = ins->mir();
 
     MOZ_ASSERT(IsNumberType(mir->value()->type()));
-    unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
+    unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
     if (mir->value()->type() == MIRType::Int32)
         masm.store32(ToRegister(ins->value()), Address(GlobalReg, addr));
     else if (mir->value()->type() == MIRType::Float32)
         masm.storeFloat32(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
     else
         masm.storeDouble(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
 }
 
--- a/js/src/jit/mips32/Assembler-mips32.h
+++ b/js/src/jit/mips32/Assembler-mips32.h
@@ -70,18 +70,18 @@ static constexpr FloatRegister ReturnFlo
 static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::f0, FloatRegister::Double };
 static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::f18, FloatRegister::Single };
 static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::f18, FloatRegister::Double };
 static constexpr FloatRegister SecondScratchFloat32Reg = { FloatRegisters::f16, FloatRegister::Single };
 static constexpr FloatRegister SecondScratchDoubleReg = { FloatRegisters::f16, FloatRegister::Double };
 
 // Registers used in the GenerateFFIIonExit Disable Activation block.
 // None of these may be the second scratch register (t8).
-static constexpr Register AsmJSIonExitRegReturnData = JSReturnReg_Data;
-static constexpr Register AsmJSIonExitRegReturnType = JSReturnReg_Type;
+static constexpr Register WasmIonExitRegReturnData = JSReturnReg_Data;
+static constexpr Register WasmIonExitRegReturnType = JSReturnReg_Type;
 
 static constexpr FloatRegister f0  = { FloatRegisters::f0, FloatRegister::Double };
 static constexpr FloatRegister f2  = { FloatRegisters::f2, FloatRegister::Double };
 static constexpr FloatRegister f4  = { FloatRegisters::f4, FloatRegister::Double };
 static constexpr FloatRegister f6  = { FloatRegisters::f6, FloatRegister::Double };
 static constexpr FloatRegister f8  = { FloatRegisters::f8, FloatRegister::Double };
 static constexpr FloatRegister f10 = { FloatRegisters::f10, FloatRegister::Double };
 static constexpr FloatRegister f12 = { FloatRegisters::f12, FloatRegister::Double };
@@ -103,17 +103,17 @@ static constexpr uint32_t JitStackAlignm
 static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
 static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
   "Stack alignment should be a non-zero multiple of sizeof(Value)");
 
 // TODO this is just a filler to prevent a build failure. The MIPS SIMD
 // alignment requirements still need to be explored.
 // TODO Copy the static_asserts from x64/x86 assembler files.
 static constexpr uint32_t SimdMemoryAlignment = 8;
-static constexpr uint32_t AsmJSStackAlignment = SimdMemoryAlignment;
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
 
 // Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
 static constexpr bool SupportsUint32x4FloatConversions = false;
 
 // Does this architecture support comparisons of unsigned integer vectors?
 static constexpr bool SupportsUint8x16Compares = false;
 static constexpr bool SupportsUint16x8Compares = false;
 static constexpr bool SupportsUint32x4Compares = false;
--- a/js/src/jit/mips32/CodeGenerator-mips32.cpp
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -619,29 +619,29 @@ CodeGeneratorMIPS::visitWasmUnalignedSto
 {
     emitWasmStoreI64(lir);
 }
 
 void
 CodeGeneratorMIPS::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
 {
     const MWasmLoadGlobalVar* mir = ins->mir();
-    unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
+    unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
     MOZ_ASSERT(mir->type() == MIRType::Int64);
     Register64 output = ToOutRegister64(ins);
 
     masm.load32(Address(GlobalReg, addr + INT64LOW_OFFSET), output.low);
     masm.load32(Address(GlobalReg, addr + INT64HIGH_OFFSET), output.high);
 }
 
 void
 CodeGeneratorMIPS::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
 {
     const MWasmStoreGlobalVar* mir = ins->mir();
-    unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
+    unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
     MOZ_ASSERT (mir->value()->type() == MIRType::Int64);
     Register64 input = ToRegister64(ins->value());
 
     masm.store32(input.low, Address(GlobalReg, addr + INT64LOW_OFFSET));
     masm.store32(input.high, Address(GlobalReg, addr + INT64HIGH_OFFSET));
 }
 
 void
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -799,17 +799,17 @@ MacroAssemblerMIPSCompat::movePtr(ImmGCP
 void
 MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest)
 {
     movePtr(ImmWord(uintptr_t(imm.value)), dest);
 }
 void
 MacroAssemblerMIPSCompat::movePtr(wasm::SymbolicAddress imm, Register dest)
 {
-    append(AsmJSAbsoluteAddress(CodeOffset(nextOffset().getOffset()), imm));
+    append(WasmAbsoluteAddress(CodeOffset(nextOffset().getOffset()), imm));
     ma_liPatchable(dest, ImmWord(-1));
 }
 
 void
 MacroAssemblerMIPSCompat::load8ZeroExtend(const Address& address, Register dest)
 {
     ma_load(dest, address, SizeByte, ZeroExtend);
 }
@@ -2190,28 +2190,28 @@ MacroAssembler::setupUnalignedABICall(Re
 
     // Force sp to be aligned
     asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
     ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
     storePtr(scratch, Address(StackPointer, 0));
 }
 
 void
-MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
 {
     MOZ_ASSERT(inCall_);
     uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
 
     // Reserve place for $ra.
     stackForCall += sizeof(intptr_t);
 
     if (dynamicAlignment_) {
         stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
     } else {
-        uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
+        uint32_t alignmentAtPrologue = callFromWasm ? sizeof(WasmFrame) : 0;
         stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
                                              ABIStackAlignment);
     }
 
     *stackAdjust = stackForCall;
     reserveStack(stackForCall);
 
     // Save $ra because call is going to clobber it. Restore it in
--- a/js/src/jit/mips32/MacroAssembler-mips32.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -1007,22 +1007,22 @@ class MacroAssemblerMIPSCompat : public 
         ma_liPatchable(bounded, ImmWord(0));
         return bo;
     }
 
     void moveFloat32(FloatRegister src, FloatRegister dest) {
         as_movs(dest, src);
     }
     void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
-        loadPtr(Address(GlobalReg, globalDataOffset - AsmJSGlobalRegBias), dest);
+        loadPtr(Address(GlobalReg, globalDataOffset - WasmGlobalRegBias), dest);
     }
     void loadWasmPinnedRegsFromTls() {
         loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg);
         loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg);
-        ma_addu(GlobalReg, Imm32(AsmJSGlobalRegBias));
+        ma_addu(GlobalReg, Imm32(WasmGlobalRegBias));
     }
 
     // Instrumentation for entering and leaving the profiler.
     void profilerEnterFrame(Register framePtr, Register scratch);
     void profilerExitFrame();
 };
 
 typedef MacroAssemblerMIPSCompat MacroAssemblerSpecific;
--- a/js/src/jit/mips32/Simulator-mips32.cpp
+++ b/js/src/jit/mips32/Simulator-mips32.cpp
@@ -3351,17 +3351,17 @@ Simulator::execute()
             dbg.debug();
         } else {
             SimInstruction* instr = reinterpret_cast<SimInstruction*>(program_counter);
             instructionDecode(instr);
             icount_++;
 
             int32_t rpc = resume_pc_;
             if (MOZ_UNLIKELY(rpc != 0)) {
-                // AsmJS signal handler ran and we have to adjust the pc.
+                // wasm signal handler ran and we have to adjust the pc.
                 activation->setResumePC((void*)get_pc());
                 set_pc(rpc);
                 resume_pc_ = 0;
             }
         }
         program_counter = get_pc();
     }
 }
--- a/js/src/jit/mips64/Assembler-mips64.h
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -45,17 +45,17 @@ static constexpr Register ABINonArgReg2 
 static constexpr Register ABINonArgReturnReg0 = t0;
 static constexpr Register ABINonArgReturnReg1 = t1;
 
 // TLS pointer argument register for WebAssembly functions. This must not alias
 // any other register used for passing function arguments or return values.
 // Preserved by WebAssembly functions.
 static constexpr Register WasmTlsReg = s5;
 
-// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// Registers used for wasm table calls. These registers must be disjoint
 // from the ABI argument registers, WasmTlsReg and each other.
 static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
 static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
 static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
 
 static constexpr Register JSReturnReg = v1;
 static constexpr Register JSReturnReg_Type = JSReturnReg;
 static constexpr Register JSReturnReg_Data = JSReturnReg;
@@ -64,18 +64,18 @@ static constexpr FloatRegister ReturnFlo
 static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::f0, FloatRegisters::Double };
 static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::f23, FloatRegisters::Single };
 static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::f23, FloatRegisters::Double };
 static constexpr FloatRegister SecondScratchFloat32Reg = { FloatRegisters::f21, FloatRegisters::Single };
 static constexpr FloatRegister SecondScratchDoubleReg = { FloatRegisters::f21, FloatRegisters::Double };
 
 // Registers used in the GenerateFFIIonExit Disable Activation block.
 // None of these may be the second scratch register (t8).
-static constexpr Register AsmJSIonExitRegReturnData = JSReturnReg_Data;
-static constexpr Register AsmJSIonExitRegReturnType = JSReturnReg_Type;
+static constexpr Register WasmIonExitRegReturnData = JSReturnReg_Data;
+static constexpr Register WasmIonExitRegReturnType = JSReturnReg_Type;
 
 static constexpr FloatRegister f0  = { FloatRegisters::f0, FloatRegisters::Double };
 static constexpr FloatRegister f1  = { FloatRegisters::f1, FloatRegisters::Double };
 static constexpr FloatRegister f2  = { FloatRegisters::f2, FloatRegisters::Double };
 static constexpr FloatRegister f3  = { FloatRegisters::f3, FloatRegisters::Double };
 static constexpr FloatRegister f4  = { FloatRegisters::f4, FloatRegisters::Double };
 static constexpr FloatRegister f5  = { FloatRegisters::f5, FloatRegisters::Double };
 static constexpr FloatRegister f6  = { FloatRegisters::f6, FloatRegisters::Double };
@@ -114,17 +114,17 @@ static constexpr uint32_t JitStackValueA
 static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
   "Stack alignment should be a non-zero multiple of sizeof(Value)");
 
 // TODO this is just a filler to prevent a build failure. The MIPS SIMD
 // alignment requirements still need to be explored.
 // TODO Copy the static_asserts from x64/x86 assembler files.
 static constexpr uint32_t SimdMemoryAlignment = 16;
 
-static constexpr uint32_t AsmJSStackAlignment = SimdMemoryAlignment;
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
 
 // Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
 static constexpr bool SupportsUint32x4FloatConversions = false;
 
 // Does this architecture support comparisons of unsigned integer vectors?
 static constexpr bool SupportsUint8x16Compares = false;
 static constexpr bool SupportsUint16x8Compares = false;
 static constexpr bool SupportsUint32x4Compares = false;
--- a/js/src/jit/mips64/CodeGenerator-mips64.cpp
+++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp
@@ -539,26 +539,26 @@ CodeGeneratorMIPS64::visitWasmUnalignedS
 {
     emitWasmStoreI64(lir);
 }
 
 void
 CodeGeneratorMIPS64::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
 {
     const MWasmLoadGlobalVar* mir = ins->mir();
-    unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
+    unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
     MOZ_ASSERT(mir->type() == MIRType::Int64);
     masm.load64(Address(GlobalReg, addr), ToOutRegister64(ins));
 }
 
 void
 CodeGeneratorMIPS64::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
 {
     const MWasmStoreGlobalVar* mir = ins->mir();
-    unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
+    unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
     MOZ_ASSERT(mir->value()->type() == MIRType::Int64);
     masm.store64(ToRegister64(ins->value()), Address(GlobalReg, addr));
 }
 
 void
 CodeGeneratorMIPS64::visitWasmSelectI64(LWasmSelectI64* lir)
 {
     MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -1019,17 +1019,17 @@ MacroAssemblerMIPS64Compat::movePtr(ImmG
 void
 MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest)
 {
     movePtr(ImmWord(uintptr_t(imm.value)), dest);
 }
 void
 MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm, Register dest)
 {
-    append(AsmJSAbsoluteAddress(CodeOffset(nextOffset().getOffset()), imm));
+    append(WasmAbsoluteAddress(CodeOffset(nextOffset().getOffset()), imm));
     ma_liPatchable(dest, ImmWord(-1));
 }
 
 void
 MacroAssemblerMIPS64Compat::load8ZeroExtend(const Address& address, Register dest)
 {
     ma_load(dest, address, SizeByte, ZeroExtend);
 }
@@ -2334,28 +2334,28 @@ MacroAssembler::setupUnalignedABICall(Re
 
     // Force sp to be aligned
     asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
     ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
     storePtr(scratch, Address(StackPointer, 0));
 }
 
 void
-MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
 {
     MOZ_ASSERT(inCall_);
     uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
 
     // Reserve place for $ra.
     stackForCall += sizeof(intptr_t);
 
     if (dynamicAlignment_) {
         stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
     } else {
-        uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
+        uint32_t alignmentAtPrologue = callFromWasm ? sizeof(WasmFrame) : 0;
         stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
                                              ABIStackAlignment);
     }
 
     *stackAdjust = stackForCall;
     reserveStack(stackForCall);
 
     // Save $ra because call is going to clobber it. Restore it in
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -1025,22 +1025,22 @@ class MacroAssemblerMIPS64Compat : publi
         return bo;
     }
 
     void moveFloat32(FloatRegister src, FloatRegister dest) {
         as_movs(dest, src);
     }
 
     void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
-        loadPtr(Address(GlobalReg, globalDataOffset - AsmJSGlobalRegBias), dest);
+        loadPtr(Address(GlobalReg, globalDataOffset - WasmGlobalRegBias), dest);
     }
     void loadWasmPinnedRegsFromTls() {
         loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg);
         loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg);
-        ma_daddu(GlobalReg, Imm32(AsmJSGlobalRegBias));
+        ma_daddu(GlobalReg, Imm32(WasmGlobalRegBias));
     }
 
     // Instrumentation for entering and leaving the profiler.
     void profilerEnterFrame(Register framePtr, Register scratch);
     void profilerExitFrame();
 };
 
 typedef MacroAssemblerMIPS64Compat MacroAssemblerSpecific;
--- a/js/src/jit/mips64/Simulator-mips64.cpp
+++ b/js/src/jit/mips64/Simulator-mips64.cpp
@@ -3702,17 +3702,17 @@ Simulator::execute()
             if (single_stepping_)
                 single_step_callback_(single_step_callback_arg_, this, (void*)program_counter);
             SimInstruction* instr = reinterpret_cast<SimInstruction *>(program_counter);
             instructionDecode(instr);
             icount_++;
 
             int64_t rpc = resume_pc_;
             if (MOZ_UNLIKELY(rpc != 0)) {
-                // AsmJS signal handler ran and we have to adjust the pc.
+                // wasm signal handler ran and we have to adjust the pc.
                 activation->setResumePC((void*)get_pc());
                 set_pc(rpc);
                 resume_pc_ = 0;
             }
         }
         program_counter = get_pc();
     }
 
--- a/js/src/jit/none/Architecture-none.h
+++ b/js/src/jit/none/Architecture-none.h
@@ -11,17 +11,17 @@
 // platforms, so include it here to avoid inadvertent build bustage.
 #include "jit/JitSpewer.h"
 
 namespace js {
 namespace jit {
 
 static const bool SupportsSimd = false;
 static const uint32_t SimdMemoryAlignment = 4; // Make it 4 to avoid a bunch of div-by-zero warnings
-static const uint32_t AsmJSStackAlignment = 8;
+static const uint32_t WasmStackAlignment = 8;
 
 // Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
 static constexpr bool SupportsUint32x4FloatConversions = false;
 
 // Does this architecture support comparisons of unsigned integer vectors?
 static constexpr bool SupportsUint8x16Compares = false;
 static constexpr bool SupportsUint16x8Compares = false;
 static constexpr bool SupportsUint32x4Compares = false;
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -38,25 +38,25 @@ static constexpr Register InvalidReg = {
 
 static constexpr Register IntArgReg0 = { Registers::invalid_reg };
 static constexpr Register IntArgReg1 = { Registers::invalid_reg };
 static constexpr Register IntArgReg2 = { Registers::invalid_reg };
 static constexpr Register IntArgReg3 = { Registers::invalid_reg };
 static constexpr Register GlobalReg = { Registers::invalid_reg };
 static constexpr Register HeapReg = { Registers::invalid_reg };
 
-static constexpr Register AsmJSIonExitRegCallee = { Registers::invalid_reg };
-static constexpr Register AsmJSIonExitRegE0 = { Registers::invalid_reg };
-static constexpr Register AsmJSIonExitRegE1 = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegCallee = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegE0 = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegE1 = { Registers::invalid_reg };
 
-static constexpr Register AsmJSIonExitRegReturnData = { Registers::invalid_reg };
-static constexpr Register AsmJSIonExitRegReturnType = { Registers::invalid_reg };
-static constexpr Register AsmJSIonExitRegD0 = { Registers::invalid_reg };
-static constexpr Register AsmJSIonExitRegD1 = { Registers::invalid_reg };
-static constexpr Register AsmJSIonExitRegD2 = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegReturnData = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegReturnType = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegD0 = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegD1 = { Registers::invalid_reg };
+static constexpr Register WasmIonExitRegD2 = { Registers::invalid_reg };
 
 static constexpr Register RegExpTesterRegExpReg = { Registers::invalid_reg };
 static constexpr Register RegExpTesterStringReg = { Registers::invalid_reg };
 static constexpr Register RegExpTesterLastIndexReg = { Registers::invalid_reg };
 static constexpr Register RegExpTesterStickyReg = { Registers::invalid_reg };
 
 static constexpr Register RegExpMatcherRegExpReg = { Registers::invalid_reg };
 static constexpr Register RegExpMatcherStringReg = { Registers::invalid_reg };
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -144,68 +144,68 @@ struct Imm64
     }
 
     inline Imm32 firstHalf() const;
     inline Imm32 secondHalf() const;
 };
 
 #ifdef DEBUG
 static inline bool
-IsCompilingAsmJS()
+IsCompilingWasm()
 {
-    // asm.js compilation pushes a JitContext with a null JSCompartment.
+    // wasm compilation pushes a JitContext with a null JSCompartment.
     return GetJitContext()->compartment == nullptr;
 }
 #endif
 
 // Pointer to be embedded as an immediate in an instruction.
 struct ImmPtr
 {
     void* value;
 
     explicit ImmPtr(const void* value) : value(const_cast<void*>(value))
     {
         // To make code serialization-safe, wasm compilation should only
         // compile pointer immediates using a SymbolicAddress.
-        MOZ_ASSERT(!IsCompilingAsmJS());
+        MOZ_ASSERT(!IsCompilingWasm());
     }
 
     template <class R>
     explicit ImmPtr(R (*pf)())
       : value(JS_FUNC_TO_DATA_PTR(void*, pf))
     {
-        MOZ_ASSERT(!IsCompilingAsmJS());
+        MOZ_ASSERT(!IsCompilingWasm());
     }
 
     template <class R, class A1>
     explicit ImmPtr(R (*pf)(A1))
       : value(JS_FUNC_TO_DATA_PTR(void*, pf))
     {
-        MOZ_ASSERT(!IsCompilingAsmJS());
+        MOZ_ASSERT(!IsCompilingWasm());
     }
 
     template <class R, class A1, class A2>
     explicit ImmPtr(R (*pf)(A1, A2))
       : value(JS_FUNC_TO_DATA_PTR(void*, pf))
     {
-        MOZ_ASSERT(!IsCompilingAsmJS());
+        MOZ_ASSERT(!IsCompilingWasm());
     }
 
     template <class R, class A1, class A2, class A3>
     explicit ImmPtr(R (*pf)(A1, A2, A3))
       : value(JS_FUNC_TO_DATA_PTR(void*, pf))
     {
-        MOZ_ASSERT(!IsCompilingAsmJS());
+        MOZ_ASSERT(!IsCompilingWasm());
     }
 
     template <class R, class A1, class A2, class A3, class A4>
     explicit ImmPtr(R (*pf)(A1, A2, A3, A4))
       : value(JS_FUNC_TO_DATA_PTR(void*, pf))
     {
-        MOZ_ASSERT(!IsCompilingAsmJS());
+        MOZ_ASSERT(!IsCompilingWasm());
     }
 
 };
 
 // The same as ImmPtr except that the intention is to patch this
 // instruction. The initial value of the immediate is 'addr' and this value is
 // either clobbered or used in the patching process.
 struct PatchedImmPtr {
@@ -230,34 +230,34 @@ class ImmGCPtr
 
     explicit ImmGCPtr(const gc::Cell* ptr) : value(ptr)
     {
         // Nursery pointers can't be used if the main thread might be currently
         // performing a minor GC.
         MOZ_ASSERT_IF(ptr && !ptr->isTenured(),
                       !CurrentThreadIsIonCompilingSafeForMinorGC());
 
-        // asm.js shouldn't be creating GC things
-        MOZ_ASSERT(!IsCompilingAsmJS());
+        // wasm shouldn't be creating GC things
+        MOZ_ASSERT(!IsCompilingWasm());
     }
 
   private:
     ImmGCPtr() : value(0) {}
 };
 
 // Pointer to be embedded as an immediate that is loaded/stored from by an
 // instruction.
 struct AbsoluteAddress
 {
     void* addr;
 
     explicit AbsoluteAddress(const void* addr)
       : addr(const_cast<void*>(addr))
     {
-        MOZ_ASSERT(!IsCompilingAsmJS());
+        MOZ_ASSERT(!IsCompilingWasm());
     }
 
     AbsoluteAddress offset(ptrdiff_t delta) {
         return AbsoluteAddress(((uint8_t*) addr) + delta);
     }
 };
 
 // The same as AbsoluteAddress except that the intention is to patch this
@@ -656,42 +656,42 @@ class CodeLocationLabel
         return raw_;
     }
     uint8_t* offset() const {
         MOZ_ASSERT(state_ == Relative);
         return raw_;
     }
 };
 
-// As an invariant across architectures, within asm.js code:
-//   $sp % AsmJSStackAlignment = (sizeof(AsmJSFrame) + masm.framePushed) % AsmJSStackAlignment
-// Thus, AsmJSFrame represents the bytes pushed after the call (which occurred
-// with a AsmJSStackAlignment-aligned StackPointer) that are not included in
+// As an invariant across architectures, within wasm code:
+//   $sp % WasmStackAlignment = (sizeof(WasmFrame) + masm.framePushed) % WasmStackAlignment
+// Thus, WasmFrame represents the bytes pushed after the call (which occurred
+// with a WasmStackAlignment-aligned StackPointer) that are not included in
 // masm.framePushed.
-struct AsmJSFrame
+struct WasmFrame
 {
     // The caller's saved frame pointer. In non-profiling mode, internal
-    // asm.js-to-asm.js calls don't update fp and thus don't save the caller's
+    // wasm-to-wasm calls don't update fp and thus don't save the caller's
     // frame pointer; the space is reserved, however, so that profiling mode can
     // reuse the same function body without recompiling.
     uint8_t* callerFP;
 
     // The return address pushed by the call (in the case of ARM/MIPS the return
     // address is pushed by the first instruction of the prologue).
     void* returnAddress;
 };
-static_assert(sizeof(AsmJSFrame) == 2 * sizeof(void*), "?!");
-static const uint32_t AsmJSFrameBytesAfterReturnAddress = sizeof(void*);
+static_assert(sizeof(WasmFrame) == 2 * sizeof(void*), "?!");
+static const uint32_t WasmFrameBytesAfterReturnAddress = sizeof(void*);
 
 // Represents an instruction to be patched and the intended pointee. These
 // links are accumulated in the MacroAssembler, but patching is done outside
 // the MacroAssembler (in Module::staticallyLink).
-struct AsmJSAbsoluteAddress
+struct WasmAbsoluteAddress
 {
-    AsmJSAbsoluteAddress(CodeOffset patchAt, wasm::SymbolicAddress target)
+    WasmAbsoluteAddress(CodeOffset patchAt, wasm::SymbolicAddress target)
       : patchAt(patchAt), target(target) {}
 
     CodeOffset patchAt;
     wasm::SymbolicAddress target;
 };
 
 } // namespace jit
 
@@ -745,17 +745,17 @@ class MemoryAccessDesc
     bool isSimd() const { return Scalar::isSimdType(type_); }
     bool isUnaligned() const { return align() && align() < byteSize(); }
     bool isPlainAsmJS() const { return !hasTrap(); }
 
     void clearOffset() { offset_ = 0; }
 };
 
 // Summarizes a global access for a mutable (in asm.js) or immutable value (in
-// asm.js or the MVP) that needs to get patched later.
+// asm.js or the wasm MVP) that needs to get patched later.
 
 struct GlobalAccess
 {
     GlobalAccess(jit::CodeOffset patchAt, unsigned globalDataOffset)
       : patchAt(patchAt), globalDataOffset(globalDataOffset)
     {}
 
     jit::CodeOffset patchAt;
@@ -824,17 +824,17 @@ class AssemblerShared
 {
     wasm::CallSiteAndTargetVector callSites_;
     wasm::TrapSiteVector trapSites_;
     wasm::TrapFarJumpVector trapFarJumps_;
     wasm::MemoryAccessVector memoryAccesses_;
     wasm::MemoryPatchVector memoryPatches_;
     wasm::BoundsCheckVector boundsChecks_;
     wasm::GlobalAccessVector globalAccesses_;
-    Vector<AsmJSAbsoluteAddress, 0, SystemAllocPolicy> asmJSAbsoluteAddresses_;
+    Vector<WasmAbsoluteAddress, 0, SystemAllocPolicy> wasmAbsoluteAddresses_;
 
   protected:
     Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
 
     bool enoughMemory_;
     bool embedsNurseryPointers_;
 
   public:
@@ -858,19 +858,19 @@ class AssemblerShared
     bool embedsNurseryPointers() const {
         return embedsNurseryPointers_;
     }
 
     template <typename... Args>
     void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr, size_t framePushed,
                 Args&&... args)
     {
-        // framePushed does not include sizeof(AsmJSFrame), so add it in explicitly when
+        // framePushed does not include sizeof(WasmFrame), so add it in explicitly when
         // setting the CallSite::stackDepth.
-        wasm::CallSite cs(desc, retAddr.offset(), framePushed + sizeof(AsmJSFrame));
+        wasm::CallSite cs(desc, retAddr.offset(), framePushed + sizeof(WasmFrame));
         enoughMemory_ &= callSites_.emplaceBack(cs, mozilla::Forward<Args>(args)...);
     }
     wasm::CallSiteAndTargetVector& callSites() { return callSites_; }
 
     void append(wasm::TrapSite trapSite) {
         enoughMemory_ &= trapSites_.append(trapSite);
     }
     const wasm::TrapSiteVector& trapSites() const { return trapSites_; }
@@ -907,19 +907,19 @@ class AssemblerShared
     wasm::MemoryPatchVector&& extractMemoryPatches() { return Move(memoryPatches_); }
 
     void append(wasm::BoundsCheck check) { enoughMemory_ &= boundsChecks_.append(check); }
     wasm::BoundsCheckVector&& extractBoundsChecks() { return Move(boundsChecks_); }
 
     void append(wasm::GlobalAccess access) { enoughMemory_ &= globalAccesses_.append(access); }
     const wasm::GlobalAccessVector& globalAccesses() const { return globalAccesses_; }
 
-    void append(AsmJSAbsoluteAddress link) { enoughMemory_ &= asmJSAbsoluteAddresses_.append(link); }
-    size_t numAsmJSAbsoluteAddresses() const { return asmJSAbsoluteAddresses_.length(); }
-    AsmJSAbsoluteAddress asmJSAbsoluteAddress(size_t i) const { return asmJSAbsoluteAddresses_[i]; }
+    void append(WasmAbsoluteAddress link) { enoughMemory_ &= wasmAbsoluteAddresses_.append(link); }
+    size_t numWasmAbsoluteAddresses() const { return wasmAbsoluteAddresses_.length(); }
+    WasmAbsoluteAddress wasmAbsoluteAddress(size_t i) const { return wasmAbsoluteAddresses_[i]; }
 
     static bool canUseInSingleByteInstruction(Register reg) { return true; }
 
     void addCodeLabel(CodeLabel label) {
         propagateOOM(codeLabels_.append(label));
     }
     size_t numCodeLabels() const {
         return codeLabels_.length();
@@ -958,20 +958,20 @@ class AssemblerShared
         for (; i < boundsChecks_.length(); i++)
             boundsChecks_[i].offsetBy(delta);
 
         i = globalAccesses_.length();
         enoughMemory_ &= globalAccesses_.appendAll(other.globalAccesses_);
         for (; i < globalAccesses_.length(); i++)
             globalAccesses_[i].patchAt.offsetBy(delta);
 
-        i = asmJSAbsoluteAddresses_.length();
-        enoughMemory_ &= asmJSAbsoluteAddresses_.appendAll(other.asmJSAbsoluteAddresses_);
-        for (; i < asmJSAbsoluteAddresses_.length(); i++)
-            asmJSAbsoluteAddresses_[i].patchAt.offsetBy(delta);
+        i = wasmAbsoluteAddresses_.length();
+        enoughMemory_ &= wasmAbsoluteAddresses_.appendAll(other.wasmAbsoluteAddresses_);
+        for (; i < wasmAbsoluteAddresses_.length(); i++)
+            wasmAbsoluteAddresses_[i].patchAt.offsetBy(delta);
 
         i = codeLabels_.length();
         enoughMemory_ &= codeLabels_.appendAll(other.codeLabels_);
         for (; i < codeLabels_.length(); i++)
             codeLabels_[i].offsetBy(delta);
 
         return !oom();
     }
--- a/js/src/jit/shared/CodeGenerator-shared-inl.h
+++ b/js/src/jit/shared/CodeGenerator-shared-inl.h
@@ -217,17 +217,17 @@ GetTempValue(Register type, Register pay
 #error "Unknown"
 #endif
 }
 
 int32_t
 CodeGeneratorShared::ArgToStackOffset(int32_t slot) const
 {
     return masm.framePushed() +
-           (gen->compilingWasm() ? sizeof(AsmJSFrame) : sizeof(JitFrameLayout)) +
+           (gen->compilingWasm() ? sizeof(WasmFrame) : sizeof(JitFrameLayout)) +
            slot;
 }
 
 int32_t
 CodeGeneratorShared::CalleeStackOffset() const
 {
     return masm.framePushed() + JitFrameLayout::offsetOfCalleeToken();
 }
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -75,42 +75,42 @@ CodeGeneratorShared::CodeGeneratorShared
 #endif
     frameDepth_(graph->paddedLocalSlotsSize() + graph->argumentsSize()),
     frameInitialAdjustment_(0)
 {
     if (gen->isProfilerInstrumentationEnabled())
         masm.enableProfilingInstrumentation();
 
     if (gen->compilingWasm()) {
-        // Since asm.js uses the system ABI which does not necessarily use a
+        // Since wasm uses the system ABI which does not necessarily use a
         // regular array where all slots are sizeof(Value), it maintains the max
         // argument stack depth separately.
         MOZ_ASSERT(graph->argumentSlotCount() == 0);
         frameDepth_ += gen->wasmMaxStackArgBytes();
 
         if (gen->usesSimd()) {
             // If the function uses any SIMD then we may need to insert padding
             // so that local slots are aligned for SIMD.
-            frameInitialAdjustment_ = ComputeByteAlignment(sizeof(AsmJSFrame),
-                                                           AsmJSStackAlignment);
+            frameInitialAdjustment_ = ComputeByteAlignment(sizeof(WasmFrame),
+                                                           WasmStackAlignment);
             frameDepth_ += frameInitialAdjustment_;
             // Keep the stack aligned. Some SIMD sequences build values on the
             // stack and need the stack aligned.
-            frameDepth_ += ComputeByteAlignment(sizeof(AsmJSFrame) + frameDepth_,
-                                                AsmJSStackAlignment);
+            frameDepth_ += ComputeByteAlignment(sizeof(WasmFrame) + frameDepth_,
+                                                WasmStackAlignment);
         } else if (gen->performsCall()) {
             // An MWasmCall does not align the stack pointer at calls sites but
             // instead relies on the a priori stack adjustment. This must be the
             // last adjustment of frameDepth_.
-            frameDepth_ += ComputeByteAlignment(sizeof(AsmJSFrame) + frameDepth_,
-                                                AsmJSStackAlignment);
+            frameDepth_ += ComputeByteAlignment(sizeof(WasmFrame) + frameDepth_,
+                                                WasmStackAlignment);
         }
 
         // FrameSizeClass is only used for bailing, which cannot happen in
-        // asm.js code.
+        // wasm code.
         frameClass_ = FrameSizeClass::None();
     } else {
         frameClass_ = FrameSizeClass::FromDepth(frameDepth_);
     }
 }
 
 bool
 CodeGeneratorShared::generatePrologue()
@@ -160,17 +160,17 @@ CodeGeneratorShared::generateEpilogue()
     return true;
 }
 
 bool
 CodeGeneratorShared::generateOutOfLineCode()
 {
     for (size_t i = 0; i < outOfLineCode_.length(); i++) {
         // Add native => bytecode mapping entries for OOL sites.
-        // Not enabled on asm.js yet since asm doesn't contain bytecode mappings.
+        // Not enabled on wasm yet since it doesn't contain bytecode mappings.
         if (!gen->compilingWasm()) {
             if (!addNativeToBytecodeEntry(outOfLineCode_[i]->bytecodeSite()))
                 return false;
         }
 
         if (!gen->alloc().ensureBallast())
             return false;
 
@@ -1482,24 +1482,24 @@ CodeGeneratorShared::omitOverRecursedChe
 void
 CodeGeneratorShared::emitWasmCallBase(LWasmCallBase* ins)
 {
     MWasmCall* mir = ins->mir();
 
     if (mir->spIncrement())
         masm.freeStack(mir->spIncrement());
 
-    MOZ_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % AsmJSStackAlignment == 0);
-    static_assert(AsmJSStackAlignment >= ABIStackAlignment &&
-                  AsmJSStackAlignment % ABIStackAlignment == 0,
-                  "The asm.js stack alignment should subsume the ABI-required alignment");
+    MOZ_ASSERT((sizeof(WasmFrame) + masm.framePushed()) % WasmStackAlignment == 0);
+    static_assert(WasmStackAlignment >= ABIStackAlignment &&
+                  WasmStackAlignment % ABIStackAlignment == 0,
+                  "The wasm stack alignment should subsume the ABI-required alignment");
 
 #ifdef DEBUG
     Label ok;
-    masm.branchTestStackPtr(Assembler::Zero, Imm32(AsmJSStackAlignment - 1), &ok);
+    masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
     masm.breakpoint();
     masm.bind(&ok);
 #endif
 
     // Save the caller's TLS register in a reserved stack slot (below the
     // call's stack arguments) for retrieval after the call.
     if (mir->saveTls())
         masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), mir->tlsStackOffset()));
@@ -1553,17 +1553,17 @@ CodeGeneratorShared::emitPreBarrier(Addr
     masm.patchableCallPreBarrier(address, MIRType::Value);
 }
 
 Label*
 CodeGeneratorShared::labelForBackedgeWithImplicitCheck(MBasicBlock* mir)
 {
     // If this is a loop backedge to a loop header with an implicit interrupt
     // check, use a patchable jump. Skip this search if compiling without a
-    // script for asm.js, as there will be no interrupt check instruction.
+    // script for wasm, as there will be no interrupt check instruction.
     // Due to critical edge unsplitting there may no longer be unique loop
     // backedges, so just look for any edge going to an earlier block in RPO.
     if (!gen->compilingWasm() && mir->isLoopHeader() && mir->id() <= current->mir()->id()) {
         for (LInstructionIterator iter = mir->lir()->begin(); iter != mir->lir()->end(); iter++) {
             if (iter->isMoveGroup()) {
                 // Continue searching for an interrupt check.
             } else {
                 // The interrupt check should be the first instruction in the
--- a/js/src/jit/x64/Assembler-x64.cpp
+++ b/js/src/jit/x64/Assembler-x64.cpp
@@ -26,17 +26,17 @@ ABIArgGenerator::ABIArgGenerator()
 
 ABIArg
 ABIArgGenerator::next(MIRType type)
 {
 #if defined(XP_WIN)
     JS_STATIC_ASSERT(NumIntArgRegs == NumFloatArgRegs);
     if (regIndex_ == NumIntArgRegs) {
         if (IsSimdType(type)) {
-            // On Win64, >64 bit args need to be passed by reference, but asm.js
+            // On Win64, >64 bit args need to be passed by reference, but wasm
             // doesn't allow passing SIMD values to FFIs. The only way to reach
             // here is asm to asm calls, so we can break the ABI here.
             stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
             current_ = ABIArg(stackOffset_);
             stackOffset_ += Simd128DataSize;
         } else {
             current_ = ABIArg(stackOffset_);
             stackOffset_ += sizeof(uint64_t);
@@ -57,17 +57,17 @@ ABIArgGenerator::next(MIRType type)
         break;
       case MIRType::Int8x16:
       case MIRType::Int16x8:
       case MIRType::Int32x4:
       case MIRType::Float32x4:
       case MIRType::Bool8x16:
       case MIRType::Bool16x8:
       case MIRType::Bool32x4:
-        // On Win64, >64 bit args need to be passed by reference, but asm.js
+        // On Win64, >64 bit args need to be passed by reference, but wasm
         // doesn't allow passing SIMD values to FFIs. The only way to reach
         // here is asm to asm calls, so we can break the ABI here.
         current_ = ABIArg(FloatArgRegs[regIndex_++].asSimd128());
         break;
       default:
         MOZ_CRASH("Unexpected argument type");
     }
     return current_;
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -145,26 +145,26 @@ static constexpr FloatRegister FloatArgR
 static constexpr FloatRegister FloatArgReg5 = xmm5;
 static constexpr FloatRegister FloatArgReg6 = xmm6;
 static constexpr FloatRegister FloatArgReg7 = xmm7;
 static constexpr uint32_t NumFloatArgRegs = 8;
 static constexpr FloatRegister FloatArgRegs[NumFloatArgRegs] = { xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 };
 #endif
 
 // Registers used in the GenerateFFIIonExit Enable Activation block.
-static constexpr Register AsmJSIonExitRegCallee = r10;
-static constexpr Register AsmJSIonExitRegE0 = rax;
-static constexpr Register AsmJSIonExitRegE1 = rdi;
+static constexpr Register WasmIonExitRegCallee = r10;
+static constexpr Register WasmIonExitRegE0 = rax;
+static constexpr Register WasmIonExitRegE1 = rdi;
 
 // Registers used in the GenerateFFIIonExit Disable Activation block.
-static constexpr Register AsmJSIonExitRegReturnData = ecx;
-static constexpr Register AsmJSIonExitRegReturnType = ecx;
-static constexpr Register AsmJSIonExitRegD0 = rax;
-static constexpr Register AsmJSIonExitRegD1 = rdi;
-static constexpr Register AsmJSIonExitRegD2 = rbx;
+static constexpr Register WasmIonExitRegReturnData = ecx;
+static constexpr Register WasmIonExitRegReturnType = ecx;
+static constexpr Register WasmIonExitRegD0 = rax;
+static constexpr Register WasmIonExitRegD1 = rdi;
+static constexpr Register WasmIonExitRegD2 = rbx;
 
 // Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
 static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
 static constexpr Register RegExpMatcherStringReg = CallTempReg1;
 static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
 
 // Registerd used in RegExpTester instruction (do not use ReturnReg).
 static constexpr Register RegExpTesterRegExpReg = CallTempReg1;
@@ -233,17 +233,17 @@ static_assert(CodeAlignment % SimdMemory
   "Code alignment should be larger than any of the alignments which are used for "
   "the constant sections of the code buffer.  Thus it should be larger than the "
   "alignment for SIMD constants.");
 
 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
   "Stack alignment should be larger than any of the alignments which are used for "
   "spilled values.  Thus it should be larger than the alignment for SIMD accesses.");
 
-static const uint32_t AsmJSStackAlignment = SimdMemoryAlignment;
+static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
 
 static const Scale ScalePointer = TimesEight;
 
 } // namespace jit
 } // namespace js
 
 #include "jit/x86-shared/Assembler-x86-shared.h"
 
@@ -752,17 +752,17 @@ class Assembler : public AssemblerX86Sha
         else
             movq(word, dest);
     }
     void mov(ImmPtr imm, Register dest) {
         movq(imm, dest);
     }
     void mov(wasm::SymbolicAddress imm, Register dest) {
         masm.movq_i64r(-1, dest.encoding());
-        append(AsmJSAbsoluteAddress(CodeOffset(masm.currentOffset()), imm));
+        append(WasmAbsoluteAddress(CodeOffset(masm.currentOffset()), imm));
     }
     void mov(const Operand& src, Register dest) {
         movq(src, dest);
     }
     void mov(Register src, const Operand& dest) {
         movq(src, dest);
     }
     void mov(Imm32 imm32, const Operand& dest) {
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -447,29 +447,29 @@ MacroAssembler::setupUnalignedABICall(Re
     dynamicAlignment_ = true;
 
     movq(rsp, scratch);
     andq(Imm32(~(ABIStackAlignment - 1)), rsp);
     push(scratch);
 }
 
 void
-MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
 {
     MOZ_ASSERT(inCall_);
     uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
 
     if (dynamicAlignment_) {
         // sizeof(intptr_t) accounts for the saved stack pointer pushed by
         // setupUnalignedABICall.
         stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
                                              ABIStackAlignment);
     } else {
-        static_assert(sizeof(AsmJSFrame) % ABIStackAlignment == 0,
-                      "AsmJSFrame should be part of the stack alignment.");
+        static_assert(sizeof(WasmFrame) % ABIStackAlignment == 0,
+                      "WasmFrame should be part of the stack alignment.");
         stackForCall += ComputeByteAlignment(stackForCall + framePushed(),
                                              ABIStackAlignment);
     }
 
     *stackAdjust = stackForCall;
     reserveStack(stackForCall);
 
     // Position all arguments.
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -2843,17 +2843,17 @@ CodeGeneratorX86Shared::visitSimdExtract
     } else if (lane == 2) {
         masm.moveHighPairToLowPairFloat32(input, output);
     } else {
         uint32_t mask = MacroAssembler::ComputeShuffleMask(lane);
         masm.shuffleFloat32(mask, input, output);
     }
     // NaNs contained within SIMD values are not enforced to be canonical, so
     // when we extract an element into a "regular" scalar JS value, we have to
-    // canonicalize. In asm.js code, we can skip this, as asm.js only has to
+    // canonicalize. In wasm code, we can skip this, as wasm only has to
     // canonicalize NaNs at FFI boundaries.
     if (!gen->compilingWasm())
         masm.canonicalizeFloat(output);
 }
 
 void
 CodeGeneratorX86Shared::visitSimdInsertElementI(LSimdInsertElementI* ins)
 {
--- a/js/src/jit/x86/Assembler-x86.cpp
+++ b/js/src/jit/x86/Assembler-x86.cpp
@@ -37,17 +37,17 @@ ABIArgGenerator::next(MIRType type)
       case MIRType::Int8x16:
       case MIRType::Int16x8:
       case MIRType::Int32x4:
       case MIRType::Float32x4:
       case MIRType::Bool8x16:
       case MIRType::Bool16x8:
       case MIRType::Bool32x4:
         // SIMD values aren't passed in or out of C++, so we can make up
-        // whatever internal ABI we like. visitAsmJSPassArg assumes
+        // whatever internal ABI we like. visitWasmStackArg assumes
         // SimdMemoryAlignment.
         stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
         current_ = ABIArg(stackOffset_);
         stackOffset_ += Simd128DataSize;
         break;
       default:
         MOZ_CRASH("Unexpected argument type");
     }
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -100,39 +100,39 @@ static constexpr Register WasmTlsReg = e
 static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
 static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
 static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
 
 static constexpr Register OsrFrameReg = edx;
 static constexpr Register PreBarrierReg = edx;
 
 // Registers used in the GenerateFFIIonExit Enable Activation block.
-static constexpr Register AsmJSIonExitRegCallee = ecx;
-static constexpr Register AsmJSIonExitRegE0 = edi;
-static constexpr Register AsmJSIonExitRegE1 = eax;
+static constexpr Register WasmIonExitRegCallee = ecx;
+static constexpr Register WasmIonExitRegE0 = edi;
+static constexpr Register WasmIonExitRegE1 = eax;
 
 // Registers used in the GenerateFFIIonExit Disable Activation block.
-static constexpr Register AsmJSIonExitRegReturnData = edx;
-static constexpr Register AsmJSIonExitRegReturnType = ecx;
-static constexpr Register AsmJSIonExitRegD0 = edi;
-static constexpr Register AsmJSIonExitRegD1 = eax;
-static constexpr Register AsmJSIonExitRegD2 = esi;
+static constexpr Register WasmIonExitRegReturnData = edx;
+static constexpr Register WasmIonExitRegReturnType = ecx;
+static constexpr Register WasmIonExitRegD0 = edi;
+static constexpr Register WasmIonExitRegD1 = eax;
+static constexpr Register WasmIonExitRegD2 = esi;
 
 // Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
 static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
 static constexpr Register RegExpMatcherStringReg = CallTempReg1;
 static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
 
 // Registerd used in RegExpTester instruction (do not use ReturnReg).
 static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
 static constexpr Register RegExpTesterStringReg = CallTempReg2;
 static constexpr Register RegExpTesterLastIndexReg = CallTempReg3;
 
 // GCC stack is aligned on 16 bytes. Ion does not maintain this for internal
-// calls. asm.js code does.
+// calls. wasm code does.
 #if defined(__GNUC__)
 static constexpr uint32_t ABIStackAlignment = 16;
 #else
 static constexpr uint32_t ABIStackAlignment = 4;
 #endif
 static constexpr uint32_t CodeAlignment = 16;
 static constexpr uint32_t JitStackAlignment = 16;
 
@@ -151,17 +151,17 @@ static_assert(CodeAlignment % SimdMemory
   "Code alignment should be larger than any of the alignments which are used for "
   "the constant sections of the code buffer.  Thus it should be larger than the "
   "alignment for SIMD constants.");
 
 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
   "Stack alignment should be larger than any of the alignments which are used for "
   "spilled values.  Thus it should be larger than the alignment for SIMD accesses.");
 
-static const uint32_t AsmJSStackAlignment = SimdMemoryAlignment;
+static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
 
 struct ImmTag : public Imm32
 {
     ImmTag(JSValueTag mask)
       : Imm32(int32_t(mask))
     { }
 };
 
@@ -306,17 +306,17 @@ class Assembler : public AssemblerX86Sha
         else
             movl(imm, dest);
     }
     void mov(ImmPtr imm, Register dest) {
         mov(ImmWord(uintptr_t(imm.value)), dest);
     }
     void mov(wasm::SymbolicAddress imm, Register dest) {
         masm.movl_i32r(-1, dest.encoding());
-        append(AsmJSAbsoluteAddress(CodeOffset(masm.currentOffset()), imm));
+        append(WasmAbsoluteAddress(CodeOffset(masm.currentOffset()), imm));
     }
     void mov(const Operand& src, Register dest) {
         movl(src, dest);
     }
     void mov(Register src, const Operand& dest) {
         movl(src, dest);
     }
     void mov(Imm32 imm, const Operand& dest) {
@@ -378,21 +378,21 @@ class Assembler : public AssemblerX86Sha
             writeDataRelocation(rhs);
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
     void cmpl(Register rhs, wasm::SymbolicAddress lhs) {
         masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1);
-        append(AsmJSAbsoluteAddress(CodeOffset(masm.currentOffset()), lhs));
+        append(WasmAbsoluteAddress(CodeOffset(masm.currentOffset()), lhs));
     }
     void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) {
         JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1);
-        append(AsmJSAbsoluteAddress(CodeOffset(src.offset()), lhs));
+        append(WasmAbsoluteAddress(CodeOffset(src.offset()), lhs));
     }
 
     void adcl(Imm32 imm, Register dest) {
         masm.adcl_ir(imm.value, dest.encoding());
     }
     void adcl(Register src, Register dest) {
         masm.adcl_rr(src.encoding(), dest.encoding());
     }
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -354,28 +354,28 @@ MacroAssembler::setupUnalignedABICall(Re
     dynamicAlignment_ = true;
 
     movl(esp, scratch);
     andl(Imm32(~(ABIStackAlignment - 1)), esp);
     push(scratch);
 }
 
 void
-MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
 {
     MOZ_ASSERT(inCall_);
     uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
 
     if (dynamicAlignment_) {
         // sizeof(intptr_t) accounts for the saved stack pointer pushed by
         // setupUnalignedABICall.
         stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
                                              ABIStackAlignment);
     } else {
-        uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
+        uint32_t alignmentAtPrologue = callFromWasm ? sizeof(WasmFrame) : 0;
         stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
                                              ABIStackAlignment);
     }
 
     *stackAdjust = stackForCall;
     reserveStack(stackForCall);
 
     // Position all arguments.