Bug 1304672: Rename isCompilingAsmJS into isCompilingWasm; r?luke draft
authorBenjamin Bouvier <benj@benj.me>
Wed, 02 Nov 2016 17:20:55 +0100
changeset 432763 acfe4bfd5593fa7289583574ac90deba55ac24fe
parent 432762 09280b420dcd9509d9b4ca1ddde5206d3d589972
child 432764 74ba6afedbe5cc224348d8cbac3bdc6cdb0ee191
push id34416
push userbbouvier@mozilla.com
push dateWed, 02 Nov 2016 18:00:01 +0000
reviewersluke
bugs1304672
milestone52.0a1
Bug 1304672: Rename isCompilingAsmJS into isCompilingWasm; r?luke MozReview-Commit-ID: 3yFHEI1CoWO
js/src/jit/BacktrackingAllocator.cpp
js/src/jit/BacktrackingAllocator.h
js/src/jit/CodeGenerator.cpp
js/src/jit/CompileInfo.h
js/src/jit/Ion.cpp
js/src/jit/IonAnalysis.cpp
js/src/jit/Lowering.cpp
js/src/jit/MIRGenerator.h
js/src/jit/MacroAssembler.h
js/src/jit/RangeAnalysis.cpp
js/src/jit/RegisterAllocator.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
js/src/jit/mips32/CodeGenerator-mips32.cpp
js/src/jit/shared/CodeGenerator-shared-inl.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86/CodeGenerator-x86.cpp
--- a/js/src/jit/BacktrackingAllocator.cpp
+++ b/js/src/jit/BacktrackingAllocator.cpp
@@ -2592,17 +2592,17 @@ BacktrackingAllocator::trySplitAcrossHot
     }
 
     JitSpew(JitSpew_RegAlloc, "  split across hot range %s", hotRange->toString().get());
 
     // Tweak the splitting method when compiling asm.js code to look at actual
     // uses within the hot/cold code. This heuristic is in place as the below
     // mechanism regresses several asm.js tests. Hopefully this will be fixed
     // soon and this special case removed. See bug 948838.
-    if (compilingAsmJS()) {
+    if (compilingWasm()) {
         SplitPositionVector splitPositions;
         if (!splitPositions.append(hotRange->from()) || !splitPositions.append(hotRange->to()))
             return false;
         *success = true;
         return splitAt(bundle, splitPositions);
     }
 
     LiveBundle* hotBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
--- a/js/src/jit/BacktrackingAllocator.h
+++ b/js/src/jit/BacktrackingAllocator.h
@@ -798,18 +798,18 @@ class BacktrackingAllocator : protected 
     MOZ_MUST_USE bool splitAt(LiveBundle* bundle, const SplitPositionVector& splitPositions);
     MOZ_MUST_USE bool trySplitAcrossHotcode(LiveBundle* bundle, bool* success);
     MOZ_MUST_USE bool trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict,
                                                    bool* success);
     MOZ_MUST_USE bool trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict,
                                                      bool* success);
     MOZ_MUST_USE bool splitAcrossCalls(LiveBundle* bundle);
 
-    bool compilingAsmJS() {
-        return mir->info().compilingAsmJS();
+    bool compilingWasm() {
+        return mir->info().compilingWasm();
     }
 
     void dumpVregs();
 };
 
 } // namespace jit
 } // namespace js
 
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -163,17 +163,17 @@ CodeGenerator::CodeGenerator(MIRGenerato
   , ionScriptLabels_(gen->alloc())
   , scriptCounts_(nullptr)
   , simdRefreshTemplatesDuringLink_(0)
 {
 }
 
 CodeGenerator::~CodeGenerator()
 {
-    MOZ_ASSERT_IF(!gen->compilingAsmJS(), masm.numAsmJSAbsoluteAddresses() == 0);
+    MOZ_ASSERT_IF(!gen->compilingWasm(), masm.numAsmJSAbsoluteAddresses() == 0);
     js_delete(scriptCounts_);
 }
 
 typedef bool (*StringToNumberFn)(ExclusiveContext*, JSString*, double*);
 static const VMFunction StringToNumberInfo =
     FunctionInfo<StringToNumberFn>(StringToNumber, "StringToNumber");
 
 void
@@ -5062,17 +5062,17 @@ CodeGenerator::emitDebugForceBailing(LIn
 
 bool
 CodeGenerator::generateBody()
 {
     IonScriptCounts* counts = maybeCreateScriptCounts();
 
 #if defined(JS_ION_PERF)
     PerfSpewer* perfSpewer = &perfSpewer_;
-    if (gen->compilingAsmJS())
+    if (gen->compilingWasm())
         perfSpewer = &gen->perfSpewer();
 #endif
 
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         current = graph.getBlock(i);
 
         // Don't emit any code for trivial blocks, containing just a goto. Such
         // blocks are created to split critical edges, and if we didn't end up
@@ -6669,17 +6669,17 @@ CodeGenerator::visitModD(LModD* ins)
     Register temp = ToRegister(ins->temp());
 
     MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
 
     masm.setupUnalignedABICall(temp);
     masm.passABIArg(lhs, MoveOp::DOUBLE);
     masm.passABIArg(rhs, MoveOp::DOUBLE);
 
-    if (gen->compilingAsmJS())
+    if (gen->compilingWasm())
         masm.callWithABI(wasm::SymbolicAddress::ModD, MoveOp::DOUBLE);
     else
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NumberMod), MoveOp::DOUBLE);
 }
 
 typedef bool (*BinaryFn)(JSContext*, MutableHandleValue, MutableHandleValue, MutableHandleValue);
 
 static const VMFunction AddInfo = FunctionInfo<BinaryFn>(js::AddValues, "AddValues");
@@ -11658,17 +11658,17 @@ CodeGenerator::visitInterruptCheck(LInte
     AbsoluteAddress interruptAddr(GetJitContext()->runtime->addressOfInterruptUint32());
     masm.branch32(Assembler::NotEqual, interruptAddr, Imm32(0), ool->entry());
     masm.bind(ool->rejoin());
 }
 
 void
 CodeGenerator::visitWasmTrap(LWasmTrap* lir)
 {
-    MOZ_ASSERT(gen->compilingAsmJS());
+    MOZ_ASSERT(gen->compilingWasm());
     const MWasmTrap* mir = lir->mir();
 
     masm.jump(trap(mir, mir->trap()));
 }
 
 void
 CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins)
 {
--- a/js/src/jit/CompileInfo.h
+++ b/js/src/jit/CompileInfo.h
@@ -233,17 +233,17 @@ class CompileInfo
         nlocals_ = nlocals;
         nstack_ = 1;  /* For FunctionCompiler::pushPhiInput/popPhiOutput */
         nslots_ = nlocals_ + nstack_;
     }
 
     JSScript* script() const {
         return script_;
     }
-    bool compilingAsmJS() const {
+    bool compilingWasm() const {
         return script() == nullptr;
     }
     JSFunction* funMaybeLazy() const {
         return fun_;
     }
     ModuleObject* module() const {
         return script_->module();
     }
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -1494,28 +1494,28 @@ OptimizeMIR(MIRGenerator* mir)
     if (GetJitContext()->onMainThread())
         logger = TraceLoggerForMainThread(GetJitContext()->runtime);
     else
         logger = TraceLoggerForCurrentThread();
 
     if (mir->shouldCancel("Start"))
         return false;
 
-    if (!mir->compilingAsmJS()) {
+    if (!mir->compilingWasm()) {
         if (!MakeMRegExpHoistable(mir, graph))
             return false;
 
         if (mir->shouldCancel("Make MRegExp Hoistable"))
             return false;
     }
 
     gs.spewPass("BuildSSA");
     AssertBasicGraphCoherency(graph);
 
-    if (!JitOptions.disablePgo && !mir->compilingAsmJS()) {
+    if (!JitOptions.disablePgo && !mir->compilingWasm()) {
         AutoTraceLog log(logger, TraceLogger_PruneUnusedBranches);
         if (!PruneUnusedBranches(mir, graph))
             return false;
         gs.spewPass("Prune Unused Branches");
         AssertBasicGraphCoherency(graph);
 
         if (mir->shouldCancel("Prune Unused Branches"))
             return false;
@@ -1595,17 +1595,17 @@ OptimizeMIR(MIRGenerator* mir)
             return false;
         gs.spewPass("Scalar Replacement");
         AssertGraphCoherency(graph);
 
         if (mir->shouldCancel("Scalar Replacement"))
             return false;
     }
 
-    if (!mir->compilingAsmJS()) {
+    if (!mir->compilingWasm()) {
         AutoTraceLog log(logger, TraceLogger_ApplyTypes);
         if (!ApplyTypeInformation(mir, graph))
             return false;
         gs.spewPass("Apply types");
         AssertExtendedGraphCoherency(graph);
 
         if (mir->shouldCancel("Apply types"))
             return false;
@@ -1657,17 +1657,17 @@ OptimizeMIR(MIRGenerator* mir)
 
             gs.spewPass("Alias analysis");
             AssertExtendedGraphCoherency(graph);
 
             if (mir->shouldCancel("Alias analysis"))
                 return false;
         }
 
-        if (!mir->compilingAsmJS()) {
+        if (!mir->compilingWasm()) {
             // Eliminating dead resume point operands requires basic block
             // instructions to be numbered. Reuse the numbering computed during
             // alias analysis.
             if (!EliminateDeadResumePointOperands(mir, graph))
                 return false;
 
             if (mir->shouldCancel("Eliminate dead resume point operands"))
                 return false;
@@ -1884,25 +1884,25 @@ OptimizeMIR(MIRGenerator* mir)
         // code motion after this pass could incorrectly move a load or store
         // before its bounds check.
         if (!EliminateRedundantChecks(graph))
             return false;
         gs.spewPass("Bounds Check Elimination");
         AssertGraphCoherency(graph);
     }
 
-    if (!mir->compilingAsmJS()) {
+    if (!mir->compilingWasm()) {
         AutoTraceLog log(logger, TraceLogger_AddKeepAliveInstructions);
         if (!AddKeepAliveInstructions(graph))
             return false;
         gs.spewPass("Add KeepAlive Instructions");
         AssertGraphCoherency(graph);
     }
 
-    if (mir->compilingAsmJS()) {
+    if (mir->compilingWasm()) {
         if (!EliminateBoundsChecks(mir, graph))
             return false;
         gs.spewPass("Redundant Bounds Check Elimination");
         AssertGraphCoherency(graph);
     }
 
     return true;
 }
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -278,17 +278,17 @@ ConvertToBailingBlock(TempAllocator& all
 
     // And replace the last instruction by the unreachable control instruction.
     block->end(MUnreachable::New(alloc));
 }
 
 bool
 jit::PruneUnusedBranches(MIRGenerator* mir, MIRGraph& graph)
 {
-    MOZ_ASSERT(!mir->compilingAsmJS(), "AsmJS compilation have no code coverage support.");
+    MOZ_ASSERT(!mir->compilingWasm(), "AsmJS compilation have no code coverage support.");
 
     // We do a reverse-post-order traversal, marking basic blocks when the block
     // have to be converted into bailing blocks, and flagging block as
     // unreachable if all predecessors are flagged as bailing or unreachable.
     bool someUnreachable = false;
     for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
         if (mir->shouldCancel("Prune unused branches (main loop)"))
             return false;
@@ -1890,17 +1890,17 @@ TypeAnalyzer::graphContainsFloat32()
     return false;
 }
 
 bool
 TypeAnalyzer::tryEmitFloatOperations()
 {
     // Asm.js uses the ahead of time type checks to specialize operations, no need to check
     // them again at this point.
-    if (mir->compilingAsmJS())
+    if (mir->compilingWasm())
         return true;
 
     // Check ahead of time that there is at least one definition typed as Float32, otherwise we
     // don't need this pass.
     if (!graphContainsFloat32())
         return true;
 
     if (!markPhiConsumers())
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -128,17 +128,17 @@ TryToUseImplicitInterruptCheck(MIRGraph&
     }
 
     check->setImplicit();
 }
 
 void
 LIRGenerator::visitGoto(MGoto* ins)
 {
-    if (!gen->compilingAsmJS() && ins->block()->isLoopBackedge())
+    if (!gen->compilingWasm() && ins->block()->isLoopBackedge())
         TryToUseImplicitInterruptCheck(graph, ins->block());
 
     add(new(alloc()) LGoto(ins->target()));
 }
 
 void
 LIRGenerator::visitTableSwitch(MTableSwitch* tableswitch)
 {
@@ -4438,25 +4438,25 @@ LIRGenerator::visitSimdConvert(MSimdConv
     MOZ_ASSERT(IsSimdType(ins->type()));
     MDefinition* input = ins->input();
     LUse use = useRegister(input);
     if (ins->type() == MIRType::Int32x4) {
         MOZ_ASSERT(input->type() == MIRType::Float32x4);
         switch (ins->signedness()) {
           case SimdSign::Signed: {
               LFloat32x4ToInt32x4* lir = new(alloc()) LFloat32x4ToInt32x4(use, temp());
-              if (!gen->compilingAsmJS())
+              if (!gen->compilingWasm())
                   assignSnapshot(lir, Bailout_BoundsCheck);
               define(lir, ins);
               break;
           }
           case SimdSign::Unsigned: {
               LFloat32x4ToUint32x4* lir =
                 new (alloc()) LFloat32x4ToUint32x4(use, temp(), temp(LDefinition::SIMD128INT));
-              if (!gen->compilingAsmJS())
+              if (!gen->compilingWasm())
                   assignSnapshot(lir, Bailout_BoundsCheck);
               define(lir, ins);
               break;
           }
           default:
             MOZ_CRASH("Unexpected SimdConvert sign");
         }
     } else if (ins->type() == MIRType::Float32x4) {
@@ -4781,17 +4781,17 @@ LIRGenerator::updateResumeState(MBasicBl
     // As Value Numbering phase can remove edges from the entry basic block to a
     // code paths reachable from the OSR entry point, we have to add fixup
     // blocks to keep the dominator tree organized the same way. These fixup
     // blocks are flaged as unreachable, and should only exist iff the graph has
     // an OSR block.
     //
     // Note: RangeAnalysis can flag blocks as unreachable, but they are only
     // removed iff GVN (including UCE) is enabled.
-    MOZ_ASSERT_IF(!mir()->compilingAsmJS() && !block->unreachable(), block->entryResumePoint());
+    MOZ_ASSERT_IF(!mir()->compilingWasm() && !block->unreachable(), block->entryResumePoint());
     MOZ_ASSERT_IF(block->unreachable(), block->graph().osrBlock() ||
                   !mir()->optimizationInfo().gvnEnabled());
     lastResumePoint_ = block->entryResumePoint();
     if (JitSpewEnabled(JitSpew_IonSnapshots) && lastResumePoint_)
         SpewResumePoint(block, nullptr, lastResumePoint_);
 }
 
 bool
--- a/js/src/jit/MIRGenerator.h
+++ b/js/src/jit/MIRGenerator.h
@@ -83,17 +83,17 @@ class MIRGenerator
         if (!instrumentedProfilingIsCached_) {
             instrumentedProfiling_ = GetJitContext()->runtime->spsProfiler().enabled();
             instrumentedProfilingIsCached_ = true;
         }
         return instrumentedProfiling_;
     }
 
     bool isProfilerInstrumentationEnabled() {
-        return !compilingAsmJS() && instrumentedProfiling();
+        return !compilingWasm() && instrumentedProfiling();
     }
 
     bool isOptimizationTrackingEnabled() {
         return isProfilerInstrumentationEnabled() && !info().isAnalysis();
     }
 
     bool safeForMinorGC() const {
         return safeForMinorGC_;
@@ -121,26 +121,26 @@ class MIRGenerator
 
     void disable() {
         abortReason_ = AbortReason_Disable;
     }
     AbortReason abortReason() {
         return abortReason_;
     }
 
-    bool compilingAsmJS() const {
-        return info_->compilingAsmJS();
+    bool compilingWasm() const {
+        return info_->compilingWasm();
     }
 
     uint32_t wasmMaxStackArgBytes() const {
-        MOZ_ASSERT(compilingAsmJS());
+        MOZ_ASSERT(compilingWasm());
         return wasmMaxStackArgBytes_;
     }
     void initWasmMaxStackArgBytes(uint32_t n) {
-        MOZ_ASSERT(compilingAsmJS());
+        MOZ_ASSERT(compilingWasm());
         MOZ_ASSERT(wasmMaxStackArgBytes_ == 0);
         wasmMaxStackArgBytes_ = n;
     }
     uint32_t minAsmJSHeapLength() const {
         return minAsmJSHeapLength_;
     }
     void setPerformsCall() {
         performsCall_ = true;
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1872,17 +1872,17 @@ class MacroAssembler : public MacroAssem
     MOZ_MUST_USE bool convertConstantOrRegisterToFloatingPoint(JSContext* cx,
                                                                const ConstantOrRegister& src,
                                                                FloatRegister output, Label* fail,
                                                                MIRType outputType);
     void convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
                                             Label* fail, MIRType outputType);
 
     void outOfLineTruncateSlow(FloatRegister src, Register dest, bool widenFloatToDouble,
-                               bool compilingAsmJS);
+                               bool compilingWasm);
 
     void convertInt32ValueToDouble(const Address& address, Register scratch, Label* done);
     void convertValueToDouble(ValueOperand value, FloatRegister output, Label* fail) {
         convertValueToFloatingPoint(value, output, fail, MIRType::Double);
     }
     MOZ_MUST_USE bool convertValueToDouble(JSContext* cx, const Value& v, FloatRegister output,
                                            Label* fail) {
         return convertValueToFloatingPoint(cx, v, output, fail, MIRType::Double);
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -1962,17 +1962,17 @@ RangeAnalysis::analyzeLoop(MBasicBlock* 
 #endif
 
     // Try to compute symbolic bounds for the phi nodes at the head of this
     // loop, expressed in terms of the iteration bound just computed.
 
     for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++)
         analyzeLoopPhi(header, iterationBound, *iter);
 
-    if (!mir->compilingAsmJS()) {
+    if (!mir->compilingWasm()) {
         // Try to hoist any bounds checks from the loop using symbolic bounds.
 
         Vector<MBoundsCheck*, 0, JitAllocPolicy> hoistedChecks(alloc());
 
         for (ReversePostorderIterator iter(graph_.rpoBegin(header)); iter != graph_.rpoEnd(); iter++) {
             MBasicBlock* block = *iter;
             if (!block->isMarked())
                 continue;
@@ -2678,17 +2678,17 @@ MLimitedTruncate::needTruncation(Truncat
 
 bool
 MCompare::needTruncation(TruncateKind kind)
 {
     // If we're compiling AsmJS, don't try to optimize the comparison type, as
     // the code presumably is already using the type it wants. Also, AsmJS
     // doesn't support bailouts, so we woudn't be able to rely on
     // TruncateAfterBailouts to convert our inputs.
-    if (block()->info().compilingAsmJS())
+    if (block()->info().compilingWasm())
        return false;
 
     if (!isDoubleComparison())
         return false;
 
     // If both operands are naturally in the int32 range, we can convert from
     // a double comparison to being an int32 comparison.
     if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32())
@@ -3090,17 +3090,17 @@ bool
 RangeAnalysis::truncate()
 {
     JitSpew(JitSpew_Range, "Do range-base truncation (backward loop)");
 
     // Automatic truncation is disabled for AsmJS because the truncation logic
     // is based on IonMonkey which assumes that we can bailout if the truncation
     // logic fails. As AsmJS code has no bailout mechanism, it is safer to avoid
     // any automatic truncations.
-    MOZ_ASSERT(!mir->compilingAsmJS());
+    MOZ_ASSERT(!mir->compilingWasm());
 
     Vector<MDefinition*, 16, SystemAllocPolicy> worklist;
 
     for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd(); block++) {
         for (MInstructionReverseIterator iter(block->rbegin()); iter != block->rend(); iter++) {
             if (iter->isRecoveredOnBailout())
                 continue;
 
--- a/js/src/jit/RegisterAllocator.h
+++ b/js/src/jit/RegisterAllocator.h
@@ -275,17 +275,17 @@ class RegisterAllocator
     Vector<CodePosition, 12, SystemAllocPolicy> exitPositions;
 
     RegisterAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
       : mir(mir),
         lir(lir),
         graph(graph),
         allRegisters_(RegisterSet::All())
     {
-        if (mir->compilingAsmJS()) {
+        if (mir->compilingWasm()) {
 #if defined(JS_CODEGEN_X64)
             allRegisters_.take(AnyRegister(HeapReg));
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
             allRegisters_.take(AnyRegister(HeapReg));
             allRegisters_.take(AnyRegister(GlobalReg));
 #elif defined(JS_CODEGEN_ARM64)
             allRegisters_.take(AnyRegister(HeapReg));
             allRegisters_.take(AnyRegister(HeapLenReg));
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -619,17 +619,17 @@ CodeGeneratorARM::visitSoftDivI(LSoftDiv
     MDiv* mir = ins->mir();
 
     Label done;
     divICommon(mir, lhs, rhs, output, ins->snapshot(), done);
 
     masm.setupAlignedABICall();
     masm.passABIArg(lhs);
     masm.passABIArg(rhs);
-    if (gen->compilingAsmJS())
+    if (gen->compilingWasm())
         masm.callWithABI(wasm::SymbolicAddress::aeabi_idivmod);
     else
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_idivmod));
 
     // idivmod returns the quotient in r0, and the remainder in r1.
     if (!mir->canTruncateRemainder()) {
         MOZ_ASSERT(mir->fallible());
         masm.as_cmp(r1, Imm8(0));
@@ -800,17 +800,17 @@ CodeGeneratorARM::visitSoftModI(LSoftMod
         }
     }
 
     modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
 
     masm.setupAlignedABICall();
     masm.passABIArg(lhs);
     masm.passABIArg(rhs);
-    if (gen->compilingAsmJS())
+    if (gen->compilingWasm())
         masm.callWithABI(wasm::SymbolicAddress::aeabi_idivmod);
     else
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_idivmod));
 
     // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
     if (mir->canBeNegativeDividend()) {
         if (mir->isTruncated()) {
             // -0.0|0 == 0
@@ -2197,17 +2197,17 @@ CodeGeneratorARM::visitWasmSelect(LWasmS
         masm.moveFloat32(falseExpr, out, Assembler::Zero);
     else
         MOZ_CRASH("unhandled type in visitWasmSelect!");
 }
 
 void
 CodeGeneratorARM::visitWasmReinterpret(LWasmReinterpret* lir)
 {
-    MOZ_ASSERT(gen->compilingAsmJS());
+    MOZ_ASSERT(gen->compilingWasm());
     MWasmReinterpret* ins = lir->mir();
 
     MIRType to = ins->type();
     DebugOnly<MIRType> from = ins->input()->type();
 
     switch (to) {
       case MIRType::Int32:
         MOZ_ASSERT(from == MIRType::Float32);
@@ -3042,17 +3042,17 @@ CodeGeneratorARM::visitSoftUDivOrMod(LSo
     MMod* mod = !div ? ins->mir()->toMod() : nullptr;
 
     generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), div);
     generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), mod);
 
     masm.setupAlignedABICall();
     masm.passABIArg(lhs);
     masm.passABIArg(rhs);
-    if (gen->compilingAsmJS())
+    if (gen->compilingWasm())
         masm.callWithABI(wasm::SymbolicAddress::aeabi_uidivmod);
     else
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_uidivmod));
 
     // uidivmod returns the quotient in r0, and the remainder in r1.
     if (div && !div->canTruncateRemainder()) {
         MOZ_ASSERT(div->fallible());
         masm.as_cmp(r1, Imm8(0));
@@ -3401,17 +3401,17 @@ CodeGeneratorARM::visitDivOrModI64(LDivO
     }
 
     masm.setupUnalignedABICall(temp);
     masm.passABIArg(lhs.high);
     masm.passABIArg(lhs.low);
     masm.passABIArg(rhs.high);
     masm.passABIArg(rhs.low);
 
-    MOZ_ASSERT(gen->compilingAsmJS());
+    MOZ_ASSERT(gen->compilingWasm());
     if (lir->mir()->isMod())
         masm.callWithABI(wasm::SymbolicAddress::ModI64);
     else
         masm.callWithABI(wasm::SymbolicAddress::DivI64);
 
     MOZ_ASSERT(ReturnReg64 == output);
 
     masm.bind(&done);
@@ -3441,17 +3441,17 @@ CodeGeneratorARM::visitUDivOrModI64(LUDi
         masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
 
     masm.setupUnalignedABICall(temp);
     masm.passABIArg(lhs.high);
     masm.passABIArg(lhs.low);
     masm.passABIArg(rhs.high);
     masm.passABIArg(rhs.low);
 
-    MOZ_ASSERT(gen->compilingAsmJS());
+    MOZ_ASSERT(gen->compilingWasm());
     if (lir->mir()->isMod())
         masm.callWithABI(wasm::SymbolicAddress::UModI64);
     else
         masm.callWithABI(wasm::SymbolicAddress::UDivI64);
 }
 
 void
 CodeGeneratorARM::visitCompareI64(LCompareI64* lir)
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -2372,17 +2372,17 @@ CodeGeneratorMIPSShared::visitWasmSelect
 
         masm.bind(&done);
     }
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmReinterpret(LWasmReinterpret* lir)
 {
-    MOZ_ASSERT(gen->compilingAsmJS());
+    MOZ_ASSERT(gen->compilingWasm());
     MWasmReinterpret* ins = lir->mir();
 
     MIRType to = ins->type();
     DebugOnly<MIRType> from = ins->input()->type();
 
     switch (to) {
       case MIRType::Int32:
         MOZ_ASSERT(from == MIRType::Float32);
--- a/js/src/jit/mips32/CodeGenerator-mips32.cpp
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -403,17 +403,17 @@ CodeGeneratorMIPS::visitDivOrModI64(LDiv
     }
 
     masm.setupUnalignedABICall(temp);
     masm.passABIArg(lhs.high);
     masm.passABIArg(lhs.low);
     masm.passABIArg(rhs.high);
     masm.passABIArg(rhs.low);
 
-    MOZ_ASSERT(gen->compilingAsmJS());
+    MOZ_ASSERT(gen->compilingWasm());
     if (lir->mir()->isMod())
         masm.callWithABI(wasm::SymbolicAddress::ModI64);
     else
         masm.callWithABI(wasm::SymbolicAddress::DivI64);
     MOZ_ASSERT(ReturnReg64 == output);
 
     masm.bind(&done);
 }
@@ -442,17 +442,17 @@ CodeGeneratorMIPS::visitUDivOrModI64(LUD
         masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
 
     masm.setupUnalignedABICall(temp);
     masm.passABIArg(lhs.high);
     masm.passABIArg(lhs.low);
     masm.passABIArg(rhs.high);
     masm.passABIArg(rhs.low);
 
-    MOZ_ASSERT(gen->compilingAsmJS());
+    MOZ_ASSERT(gen->compilingWasm());
     if (lir->mir()->isMod())
         masm.callWithABI(wasm::SymbolicAddress::UModI64);
     else
         masm.callWithABI(wasm::SymbolicAddress::UDivI64);
 }
 
 template <typename T>
 void
--- a/js/src/jit/shared/CodeGenerator-shared-inl.h
+++ b/js/src/jit/shared/CodeGenerator-shared-inl.h
@@ -217,17 +217,17 @@ GetTempValue(Register type, Register pay
 #error "Unknown"
 #endif
 }
 
 int32_t
 CodeGeneratorShared::ArgToStackOffset(int32_t slot) const
 {
     return masm.framePushed() +
-           (gen->compilingAsmJS() ? sizeof(AsmJSFrame) : sizeof(JitFrameLayout)) +
+           (gen->compilingWasm() ? sizeof(AsmJSFrame) : sizeof(JitFrameLayout)) +
            slot;
 }
 
 int32_t
 CodeGeneratorShared::CalleeStackOffset() const
 {
     return masm.framePushed() + JitFrameLayout::offsetOfCalleeToken();
 }
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -74,17 +74,17 @@ CodeGeneratorShared::CodeGeneratorShared
     checkOsiPointRegisters(JitOptions.checkOsiPointRegisters),
 #endif
     frameDepth_(graph->paddedLocalSlotsSize() + graph->argumentsSize()),
     frameInitialAdjustment_(0)
 {
     if (gen->isProfilerInstrumentationEnabled())
         masm.enableProfilingInstrumentation();
 
-    if (gen->compilingAsmJS()) {
+    if (gen->compilingWasm()) {
         // Since asm.js uses the system ABI which does not necessarily use a
         // regular array where all slots are sizeof(Value), it maintains the max
         // argument stack depth separately.
         MOZ_ASSERT(graph->argumentSlotCount() == 0);
         frameDepth_ += gen->wasmMaxStackArgBytes();
 
         if (gen->usesSimd()) {
             // If the function uses any SIMD then we may need to insert padding
@@ -111,17 +111,17 @@ CodeGeneratorShared::CodeGeneratorShared
         frameClass_ = FrameSizeClass::FromDepth(frameDepth_);
     }
 }
 
 bool
 CodeGeneratorShared::generatePrologue()
 {
     MOZ_ASSERT(masm.framePushed() == 0);
-    MOZ_ASSERT(!gen->compilingAsmJS());
+    MOZ_ASSERT(!gen->compilingWasm());
 
 #ifdef JS_USE_LINK_REGISTER
     masm.pushReturnAddress();
 #endif
 
     // If profiling, save the current frame pointer to a per-thread global field.
     if (isProfilerInstrumentationEnabled())
         masm.profilerEnterFrame(masm.getStackPointer(), CallTempReg0);
@@ -135,17 +135,17 @@ CodeGeneratorShared::generatePrologue()
 
     emitTracelogIonStart();
     return true;
 }
 
 bool
 CodeGeneratorShared::generateEpilogue()
 {
-    MOZ_ASSERT(!gen->compilingAsmJS());
+    MOZ_ASSERT(!gen->compilingWasm());
     masm.bind(&returnLabel_);
 
     emitTracelogIonStop();
 
     masm.freeStack(frameSize());
     MOZ_ASSERT(masm.framePushed() == 0);
 
     // If profiling, reset the per-thread global lastJitFrame to point to
@@ -161,17 +161,17 @@ CodeGeneratorShared::generateEpilogue()
 }
 
 bool
 CodeGeneratorShared::generateOutOfLineCode()
 {
     for (size_t i = 0; i < outOfLineCode_.length(); i++) {
         // Add native => bytecode mapping entries for OOL sites.
         // Not enabled on asm.js yet since asm doesn't contain bytecode mappings.
-        if (!gen->compilingAsmJS()) {
+        if (!gen->compilingWasm()) {
             if (!addNativeToBytecodeEntry(outOfLineCode_[i]->bytecodeSite()))
                 return false;
         }
 
         if (!gen->alloc().ensureBallast())
             return false;
 
         JitSpew(JitSpew_Codegen, "# Emitting out of line code");
@@ -193,17 +193,17 @@ CodeGeneratorShared::addOutOfLineCode(Ou
     addOutOfLineCode(code, mir->trackedSite());
 }
 
 void
 CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code, const BytecodeSite* site)
 {
     code->setFramePushed(masm.framePushed());
     code->setBytecodeSite(site);
-    MOZ_ASSERT_IF(!gen->compilingAsmJS(), code->script()->containsPC(code->pc()));
+    MOZ_ASSERT_IF(!gen->compilingWasm(), code->script()->containsPC(code->pc()));
     masm.propagateOOM(outOfLineCode_.append(code));
 }
 
 bool
 CodeGeneratorShared::addNativeToBytecodeEntry(const BytecodeSite* site)
 {
     // Skip the table entirely if profiling is not enabled.
     if (!isProfilerInstrumentationEnabled())
@@ -1457,17 +1457,17 @@ CodeGeneratorShared::emitTruncateFloat32
 
 void
 CodeGeneratorShared::visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool)
 {
     FloatRegister src = ool->src();
     Register dest = ool->dest();
 
     saveVolatile(dest);
-    masm.outOfLineTruncateSlow(src, dest, ool->widenFloatToDouble(), gen->compilingAsmJS());
+    masm.outOfLineTruncateSlow(src, dest, ool->widenFloatToDouble(), gen->compilingWasm());
     restoreVolatile(dest);
 
     masm.jump(ool->rejoin());
 }
 
 bool
 CodeGeneratorShared::omitOverRecursedCheck() const
 {
@@ -1556,17 +1556,17 @@ CodeGeneratorShared::emitPreBarrier(Addr
 Label*
 CodeGeneratorShared::labelForBackedgeWithImplicitCheck(MBasicBlock* mir)
 {
     // If this is a loop backedge to a loop header with an implicit interrupt
     // check, use a patchable jump. Skip this search if compiling without a
     // script for asm.js, as there will be no interrupt check instruction.
     // Due to critical edge unsplitting there may no longer be unique loop
     // backedges, so just look for any edge going to an earlier block in RPO.
-    if (!gen->compilingAsmJS() && mir->isLoopHeader() && mir->id() <= current->mir()->id()) {
+    if (!gen->compilingWasm() && mir->isLoopHeader() && mir->id() <= current->mir()->id()) {
         for (LInstructionIterator iter = mir->lir()->begin(); iter != mir->lir()->end(); iter++) {
             if (iter->isMoveGroup()) {
                 // Continue searching for an interrupt check.
             } else {
                 // The interrupt check should be the first instruction in the
                 // loop header other than move groups.
                 MOZ_ASSERT(iter->isInterruptCheck());
                 if (iter->toInterruptCheck()->implicit())
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -362,17 +362,17 @@ CodeGeneratorX86Shared::visitWasmSelect(
 
     masm.bind(&done);
     return;
 }
 
 void
 CodeGeneratorX86Shared::visitWasmReinterpret(LWasmReinterpret* lir)
 {
-    MOZ_ASSERT(gen->compilingAsmJS());
+    MOZ_ASSERT(gen->compilingWasm());
     MWasmReinterpret* ins = lir->mir();
 
     MIRType to = ins->type();
 #ifdef DEBUG
     MIRType from = ins->input()->type();
 #endif
 
     switch (to) {
@@ -2495,17 +2495,17 @@ CodeGeneratorX86Shared::visitOutOfLineSi
     masm.loadConstantSimd128Float(Int32MaxX4, scratch);
     masm.vcmpleps(Operand(input), scratch, scratch);
     masm.vmovmskps(scratch, temp);
     masm.cmp32(temp, Imm32(0));
     masm.j(Assembler::NotEqual, &onConversionError);
 
     masm.jump(ool->rejoin());
 
-    if (gen->compilingAsmJS()) {
+    if (gen->compilingWasm()) {
         masm.bindLater(&onConversionError, trap(ool, wasm::Trap::ImpreciseSimdConversion));
     } else {
         masm.bind(&onConversionError);
         bailout(ool->ins()->snapshot());
     }
 }
 
 // Convert Float32x4 to Uint32x4.
@@ -2575,17 +2575,17 @@ CodeGeneratorX86Shared::visitFloat32x4To
     masm.bitwiseOrSimd128(Operand(scratch), out);
 
     // We still need to filter out the V-lanes. They would show up as 0x80000000
     // in both A and B. Since we cleared the valid A-lanes in B, the V-lanes are
     // the remaining negative lanes in B.
     masm.vmovmskps(scratch, temp);
     masm.cmp32(temp, Imm32(0));
 
-    if (gen->compilingAsmJS())
+    if (gen->compilingWasm())
         masm.j(Assembler::NotEqual, trap(mir, wasm::Trap::ImpreciseSimdConversion));
     else
         bailoutIf(Assembler::NotEqual, ins->snapshot());
 }
 
 void
 CodeGeneratorX86Shared::visitSimdValueInt32x4(LSimdValueInt32x4* ins)
 {
@@ -2845,17 +2845,17 @@ CodeGeneratorX86Shared::visitSimdExtract
     } else {
         uint32_t mask = MacroAssembler::ComputeShuffleMask(lane);
         masm.shuffleFloat32(mask, input, output);
     }
     // NaNs contained within SIMD values are not enforced to be canonical, so
     // when we extract an element into a "regular" scalar JS value, we have to
     // canonicalize. In asm.js code, we can skip this, as asm.js only has to
     // canonicalize NaNs at FFI boundaries.
-    if (!gen->compilingAsmJS())
+    if (!gen->compilingWasm())
         masm.canonicalizeFloat(output);
 }
 
 void
 CodeGeneratorX86Shared::visitSimdInsertElementI(LSimdInsertElementI* ins)
 {
     FloatRegister vector = ToFloatRegister(ins->vector());
     Register value = ToRegister(ins->value());
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -857,17 +857,17 @@ CodeGeneratorX86::visitOutOfLineTruncate
     }
 
     masm.bind(&fail);
     {
         saveVolatile(output);
 
         masm.setupUnalignedABICall(output);
         masm.passABIArg(input, MoveOp::DOUBLE);
-        if (gen->compilingAsmJS())
+        if (gen->compilingWasm())
             masm.callWithABI(wasm::SymbolicAddress::ToInt32);
         else
             masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
         masm.storeCallResult(output);
 
         restoreVolatile(output);
     }
 
@@ -941,17 +941,17 @@ CodeGeneratorX86::visitOutOfLineTruncate
     {
         saveVolatile(output);
 
         masm.push(input);
         masm.setupUnalignedABICall(output);
         masm.vcvtss2sd(input, input, input);
         masm.passABIArg(input.asDouble(), MoveOp::DOUBLE);
 
-        if (gen->compilingAsmJS())
+        if (gen->compilingWasm())
             masm.callWithABI(wasm::SymbolicAddress::ToInt32);
         else
             masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
 
         masm.storeCallResult(output);
         masm.pop(input);
 
         restoreVolatile(output);
@@ -1063,17 +1063,17 @@ CodeGeneratorX86::visitDivOrModI64(LDivO
     }
 
     masm.setupUnalignedABICall(temp);
     masm.passABIArg(lhs.high);
     masm.passABIArg(lhs.low);
     masm.passABIArg(rhs.high);
     masm.passABIArg(rhs.low);
 
-    MOZ_ASSERT(gen->compilingAsmJS());
+    MOZ_ASSERT(gen->compilingWasm());
     if (lir->mir()->isMod())
         masm.callWithABI(wasm::SymbolicAddress::ModI64);
     else
         masm.callWithABI(wasm::SymbolicAddress::DivI64);
 
     // output in edx:eax, move to output register.
     masm.movl(edx, output.high);
     MOZ_ASSERT(eax == output.low);
@@ -1105,17 +1105,17 @@ CodeGeneratorX86::visitUDivOrModI64(LUDi
         masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
 
     masm.setupUnalignedABICall(temp);
     masm.passABIArg(lhs.high);
     masm.passABIArg(lhs.low);
     masm.passABIArg(rhs.high);
     masm.passABIArg(rhs.low);
 
-    MOZ_ASSERT(gen->compilingAsmJS());
+    MOZ_ASSERT(gen->compilingWasm());
     if (lir->mir()->isMod())
         masm.callWithABI(wasm::SymbolicAddress::UModI64);
     else
         masm.callWithABI(wasm::SymbolicAddress::UDivI64);
 
     // output in edx:eax, move to output register.
     masm.movl(edx, output.high);
     MOZ_ASSERT(eax == output.low);