--- a/js/public/ProfilingFrameIterator.h
+++ b/js/public/ProfilingFrameIterator.h
@@ -14,22 +14,24 @@
#include "js/TypeDecls.h"
#include "js/Utility.h"
struct JSRuntime;
class JSScript;
namespace js {
class Activation;
- class AsmJSProfilingFrameIterator;
namespace jit {
class JitActivation;
class JitProfilingFrameIterator;
class JitcodeGlobalEntry;
} // namespace jit
+ namespace wasm {
+ class ProfilingFrameIterator;
+ } // namespace wasm
} // namespace js
namespace JS {
struct ForEachTrackedOptimizationAttemptOp;
struct ForEachTrackedOptimizationTypeInfoOp;
// This iterator can be used to walk the stack of a thread suspended at an
@@ -44,25 +46,25 @@ class JS_PUBLIC_API(ProfilingFrameIterat
// When moving past a JitActivation, we need to save the prevJitTop
// from it to use as the exit-frame pointer when the next caller jit
// activation (if any) comes around.
void* savedPrevJitTop_;
static const unsigned StorageSpace = 8 * sizeof(void*);
mozilla::AlignedStorage<StorageSpace> storage_;
- js::AsmJSProfilingFrameIterator& asmJSIter() {
+ js::wasm::ProfilingFrameIterator& asmJSIter() {
MOZ_ASSERT(!done());
MOZ_ASSERT(isAsmJS());
- return *reinterpret_cast<js::AsmJSProfilingFrameIterator*>(storage_.addr());
+ return *reinterpret_cast<js::wasm::ProfilingFrameIterator*>(storage_.addr());
}
- const js::AsmJSProfilingFrameIterator& asmJSIter() const {
+ const js::wasm::ProfilingFrameIterator& asmJSIter() const {
MOZ_ASSERT(!done());
MOZ_ASSERT(isAsmJS());
- return *reinterpret_cast<const js::AsmJSProfilingFrameIterator*>(storage_.addr());
+ return *reinterpret_cast<const js::wasm::ProfilingFrameIterator*>(storage_.addr());
}
js::jit::JitProfilingFrameIterator& jitIter() {
MOZ_ASSERT(!done());
MOZ_ASSERT(isJit());
return *reinterpret_cast<js::jit::JitProfilingFrameIterator*>(storage_.addr());
}
--- a/js/src/asmjs/AsmJSLink.cpp
+++ b/js/src/asmjs/AsmJSLink.cpp
@@ -15,34 +15,27 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asmjs/AsmJSLink.h"
#include "mozilla/PodOperations.h"
-#ifdef MOZ_VTUNE
-# include "vtune/VTuneWrapper.h"
-#endif
-
#include "jscntxt.h"
#include "jsmath.h"
#include "jsprf.h"
#include "jswrapper.h"
#include "asmjs/AsmJSModule.h"
#include "builtin/AtomicsObject.h"
#include "builtin/SIMD.h"
#include "frontend/BytecodeCompiler.h"
#include "jit/Ion.h"
#include "jit/JitCommon.h"
-#ifdef JS_ION_PERF
-# include "jit/PerfSpewer.h"
-#endif
#include "vm/ArrayBufferObject.h"
#include "vm/SharedArrayObject.h"
#include "vm/StringBuffer.h"
#include "jsobjinlines.h"
#include "vm/ArrayBufferObject-inl.h"
#include "vm/NativeObject-inl.h"
@@ -50,31 +43,16 @@
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::IsNaN;
using mozilla::PodZero;
static bool
-CloneModule(JSContext* cx, MutableHandle<AsmJSModuleObject*> moduleObj)
-{
- ScopedJSDeletePtr<AsmJSModule> module;
- if (!moduleObj->module().clone(cx, &module))
- return false;
-
- AsmJSModuleObject* newModuleObj = AsmJSModuleObject::create(cx, &module);
- if (!newModuleObj)
- return false;
-
- moduleObj.set(newModuleObj);
- return true;
-}
-
-static bool
LinkFail(JSContext* cx, const char* str)
{
JS_ReportErrorFlagsAndNumber(cx, JSREPORT_WARNING, GetErrorMessage,
nullptr, JSMSG_USE_ASM_LINK_FAIL, str);
return false;
}
static bool
@@ -122,20 +100,20 @@ HasPureCoercion(JSContext* cx, HandleVal
{
return true;
}
return false;
}
static bool
-ValidateGlobalVariable(JSContext* cx, const AsmJSModule& module, AsmJSModule::Global& global,
+ValidateGlobalVariable(JSContext* cx, const AsmJSModule::Global& global, uint8_t* globalData,
HandleValue importVal)
{
- void* datum = module.globalData() + global.varGlobalDataOffset();
+ void* datum = globalData + global.varGlobalDataOffset();
switch (global.varInitKind()) {
case AsmJSModule::Global::InitConstant: {
Val v = global.varInitVal();
switch (v.type()) {
case ValType::I32:
*(int32_t*)datum = v.i32();
break;
@@ -199,33 +177,33 @@ ValidateGlobalVariable(JSContext* cx, co
break;
}
}
return true;
}
static bool
-ValidateFFI(JSContext* cx, AsmJSModule::Global& global, HandleValue importVal,
- AutoObjectVector* ffis)
+ValidateFFI(JSContext* cx, const AsmJSModule::Global& global, HandleValue importVal,
+ AutoVectorRooter<JSFunction*>* ffis)
{
RootedPropertyName field(cx, global.ffiField());
RootedValue v(cx);
if (!GetDataProperty(cx, importVal, field, &v))
return false;
if (!v.isObject() || !v.toObject().is<JSFunction>())
return LinkFail(cx, "FFI imports must be functions");
(*ffis)[global.ffiIndex()].set(&v.toObject().as<JSFunction>());
return true;
}
static bool
-ValidateArrayView(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
+ValidateArrayView(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
RootedPropertyName field(cx, global.maybeViewName());
if (!field)
return true;
RootedValue v(cx);
if (!GetDataProperty(cx, globalVal, field, &v))
return false;
@@ -257,17 +235,17 @@ ValidateByteLength(JSContext* cx, Handle
RootedValue boundThis(cx, fun->getBoundFunctionThis());
if (!IsNativeFunction(boundThis, ArrayBufferObject::byteLengthGetter))
return LinkFail(cx, "bound this value must be ArrayBuffer.protototype.byteLength accessor");
return true;
}
static bool
-ValidateMathBuiltinFunction(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
+ValidateMathBuiltinFunction(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
RootedValue v(cx);
if (!GetDataProperty(cx, globalVal, cx->names().Math, &v))
return false;
RootedPropertyName field(cx, global.mathName());
if (!GetDataProperty(cx, v, field, &v))
return false;
@@ -317,17 +295,17 @@ AsmJSSimdTypeToTypeDescrType(AsmJSSimdTy
switch (type) {
case AsmJSSimdType_int32x4: return Int32x4::type;
case AsmJSSimdType_float32x4: return Float32x4::type;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected AsmJSSimdType");
}
static bool
-ValidateSimdType(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal,
+ValidateSimdType(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal,
MutableHandleValue out)
{
RootedValue v(cx);
if (!GetDataProperty(cx, globalVal, cx->names().SIMD, &v))
return false;
AsmJSSimdType type;
if (global.which() == AsmJSModule::Global::SimdCtor)
@@ -349,24 +327,24 @@ ValidateSimdType(JSContext* cx, AsmJSMod
if (AsmJSSimdTypeToTypeDescrType(type) != simdDesc->as<SimdTypeDescr>().type())
return LinkFail(cx, "bad SIMD type");
out.set(v);
return true;
}
static bool
-ValidateSimdType(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
+ValidateSimdType(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
RootedValue _(cx);
return ValidateSimdType(cx, global, globalVal, &_);
}
static bool
-ValidateSimdOperation(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
+ValidateSimdOperation(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
// SIMD operations are loaded from the SIMD type, so the type must have been
// validated before the operation.
RootedValue v(cx);
JS_ALWAYS_TRUE(ValidateSimdType(cx, global, globalVal, &v));
RootedPropertyName opName(cx, global.simdOperationName());
if (!GetDataProperty(cx, v, opName, &v))
@@ -401,17 +379,17 @@ ValidateSimdOperation(JSContext* cx, Asm
#undef SET_NATIVE
}
if (!native || !IsNativeFunction(v, native))
return LinkFail(cx, "bad SIMD.type.* operation");
return true;
}
static bool
-ValidateAtomicsBuiltinFunction(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
+ValidateAtomicsBuiltinFunction(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
RootedValue v(cx);
if (!GetDataProperty(cx, globalVal, cx->names().Atomics, &v))
return false;
RootedPropertyName field(cx, global.atomicsName());
if (!GetDataProperty(cx, v, field, &v))
return false;
@@ -432,17 +410,17 @@ ValidateAtomicsBuiltinFunction(JSContext
if (!IsNativeFunction(v, native))
return LinkFail(cx, "bad Atomics.* builtin function");
return true;
}
static bool
-ValidateConstant(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
+ValidateConstant(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
RootedPropertyName field(cx, global.constantName());
RootedValue v(cx, globalVal);
if (global.constantKind() == AsmJSModule::Global::MathConstant) {
if (!GetDataProperty(cx, v, cx->names().Math, &v))
return false;
}
@@ -461,96 +439,92 @@ ValidateConstant(JSContext* cx, AsmJSMod
if (v.toNumber() != global.constantValue())
return LinkFail(cx, "global constant value mismatch");
}
return true;
}
static bool
-LinkModuleToHeap(JSContext* cx, AsmJSModule& module, Handle<ArrayBufferObjectMaybeShared*> heap)
+CheckBuffer(JSContext* cx, AsmJSModule& module, HandleValue bufferVal,
+ MutableHandle<ArrayBufferObjectMaybeShared*> buffer)
{
- uint32_t heapLength = heap->byteLength();
+ if (module.isSharedView() && !IsSharedArrayBuffer(bufferVal))
+ return LinkFail(cx, "shared views can only be constructed onto SharedArrayBuffer");
+
+ if (!module.isSharedView() && !IsArrayBuffer(bufferVal))
+ return LinkFail(cx, "unshared views can only be constructed onto ArrayBuffer");
+
+ buffer.set(&AsAnyArrayBuffer(bufferVal));
+ uint32_t heapLength = buffer->byteLength();
if (!IsValidAsmJSHeapLength(heapLength)) {
- ScopedJSFreePtr<char> msg(
+ UniqueChars msg(
JS_smprintf("ArrayBuffer byteLength 0x%x is not a valid heap length. The next "
"valid length is 0x%x",
heapLength,
RoundUpToNextValidAsmJSHeapLength(heapLength)));
return LinkFail(cx, msg.get());
}
// This check is sufficient without considering the size of the loaded datum because heap
// loads and stores start on an aligned boundary and the heap byteLength has larger alignment.
MOZ_ASSERT((module.minHeapLength() - 1) <= INT32_MAX);
if (heapLength < module.minHeapLength()) {
- ScopedJSFreePtr<char> msg(
+ UniqueChars msg(
JS_smprintf("ArrayBuffer byteLength of 0x%x is less than 0x%x (the size implied "
"by const heap accesses and/or change-heap minimum-length requirements).",
heapLength,
module.minHeapLength()));
return LinkFail(cx, msg.get());
}
if (heapLength > module.maxHeapLength()) {
- ScopedJSFreePtr<char> msg(
+ UniqueChars msg(
JS_smprintf("ArrayBuffer byteLength 0x%x is greater than maximum length of 0x%x",
heapLength,
module.maxHeapLength()));
return LinkFail(cx, msg.get());
}
- // If we've generated the code with signal handlers in mind (for bounds
- // checks on x64 and for interrupt callback requesting on all platforms),
- // we need to be able to use signals at runtime. In particular, a module
- // can have been created using signals and cached, and executed without
- // signals activated.
- if (module.usesSignalHandlersForInterrupt() && !cx->canUseSignalHandlers())
- return LinkFail(cx, "Code generated with signal handlers but signals are deactivated");
+ // Shell builtins may have disabled signal handlers since the module we're
+ // cloning was compiled. LookupAsmJSModuleInCache checks for signal handlers
+ // as well for the caching case.
+ if (module.wasm().compileArgs() != CompileArgs(cx))
+ return LinkFail(cx, "Signals have been toggled since compilation");
- if (heap->is<ArrayBufferObject>()) {
- Rooted<ArrayBufferObject*> abheap(cx, &heap->as<ArrayBufferObject>());
- if (!ArrayBufferObject::prepareForAsmJS(cx, abheap, module.usesSignalHandlersForOOB()))
+ if (buffer->is<ArrayBufferObject>()) {
+ Rooted<ArrayBufferObject*> abheap(cx, &buffer->as<ArrayBufferObject>());
+ bool useSignalHandlers = module.wasm().compileArgs().useSignalHandlersForOOB;
+ if (!ArrayBufferObject::prepareForAsmJS(cx, abheap, useSignalHandlers))
return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
}
- module.initHeap(heap, cx);
return true;
}
static bool
DynamicallyLinkModule(JSContext* cx, const CallArgs& args, AsmJSModule& module)
{
- module.setIsDynamicallyLinked(cx->runtime());
-
HandleValue globalVal = args.get(0);
HandleValue importVal = args.get(1);
HandleValue bufferVal = args.get(2);
- Rooted<ArrayBufferObjectMaybeShared*> heap(cx);
- if (module.hasArrayView()) {
- if (module.isSharedView() && !IsSharedArrayBuffer(bufferVal))
- return LinkFail(cx, "shared views can only be constructed onto SharedArrayBuffer");
- if (!module.isSharedView() && !IsArrayBuffer(bufferVal))
- return LinkFail(cx, "unshared views can only be constructed onto ArrayBuffer");
- heap = &AsAnyArrayBuffer(bufferVal);
- if (!LinkModuleToHeap(cx, module, heap))
- return false;
- }
+ Rooted<ArrayBufferObjectMaybeShared*> buffer(cx);
+ if (module.hasArrayView() && !CheckBuffer(cx, module, bufferVal, &buffer))
+ return false;
- AutoObjectVector ffis(cx);
+ AutoVectorRooter<JSFunction*> ffis(cx);
if (!ffis.resize(module.numFFIs()))
return false;
- for (unsigned i = 0; i < module.numGlobals(); i++) {
- AsmJSModule::Global& global = module.global(i);
+ for (const AsmJSModule::Global& global : module.globals()) {
switch (global.which()) {
case AsmJSModule::Global::Variable:
- if (!ValidateGlobalVariable(cx, module, global, importVal))
+ if (!ValidateGlobalVariable(cx, global, module.wasm().globalData(), importVal))
return false;
break;
case AsmJSModule::Global::FFI:
if (!ValidateFFI(cx, global, importVal, &ffis))
return false;
break;
case AsmJSModule::Global::ArrayView:
case AsmJSModule::Global::ArrayViewCtor:
@@ -579,25 +553,23 @@ DynamicallyLinkModule(JSContext* cx, con
break;
case AsmJSModule::Global::SimdOperation:
if (!ValidateSimdOperation(cx, global, globalVal))
return false;
break;
}
}
- for (unsigned i = 0; i < module.numExits(); i++) {
- const AsmJSModule::Exit& exit = module.exit(i);
- exit.datum(module).fun = &ffis[exit.ffiIndex()]->as<JSFunction>();
+ AutoVectorRooter<JSFunction*> imports(cx);
+ for (const AsmJSModule::Import& import : module.imports()) {
+ if (!imports.append(ffis[import.ffiIndex()]))
+ return false;
}
- // See the comment in AllocateExecutableMemory.
- ExecutableAllocator::makeExecutable(module.codeBase(), module.codeBytes());
-
- return true;
+ return module.wasm().dynamicallyLink(cx, buffer, imports);
}
static bool
ChangeHeap(JSContext* cx, AsmJSModule& module, const CallArgs& args)
{
HandleValue bufferArg = args.get(0);
if (!IsArrayBuffer(bufferArg)) {
ReportIncompatible(cx, args);
@@ -616,79 +588,76 @@ ChangeHeap(JSContext* cx, AsmJSModule& m
if (!module.hasArrayView()) {
args.rval().set(BooleanValue(true));
return true;
}
MOZ_ASSERT(IsValidAsmJSHeapLength(heapLength));
- if (!ArrayBufferObject::prepareForAsmJS(cx, newBuffer, module.usesSignalHandlersForOOB()))
+ bool useSignalHandlers = module.wasm().compileArgs().useSignalHandlersForOOB;
+ if (!ArrayBufferObject::prepareForAsmJS(cx, newBuffer, useSignalHandlers))
return false;
- args.rval().set(BooleanValue(module.changeHeap(newBuffer, cx)));
+ args.rval().set(BooleanValue(module.wasm().changeHeap(newBuffer, cx)));
return true;
}
// An asm.js function stores, in its extended slots:
// - a pointer to the module from which it was returned
// - its index in the ordered list of exported functions
static const unsigned ASM_MODULE_SLOT = 0;
static const unsigned ASM_EXPORT_INDEX_SLOT = 1;
static unsigned
-FunctionToExportedFunctionIndex(HandleFunction fun)
+FunctionToExportIndex(HandleFunction fun)
{
MOZ_ASSERT(IsAsmJSFunction(fun));
Value v = fun->getExtendedSlot(ASM_EXPORT_INDEX_SLOT);
return v.toInt32();
}
-static const AsmJSModule::ExportedFunction&
-FunctionToExportedFunction(HandleFunction fun, AsmJSModule& module)
-{
- unsigned funIndex = FunctionToExportedFunctionIndex(fun);
- return module.exportedFunction(funIndex);
-}
-
static AsmJSModule&
FunctionToEnclosingModule(HandleFunction fun)
{
return fun->getExtendedSlot(ASM_MODULE_SLOT).toObject().as<AsmJSModuleObject>().module();
}
// This is the js::Native for functions exported by an asm.js module.
static bool
CallAsmJS(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs callArgs = CallArgsFromVp(argc, vp);
RootedFunction callee(cx, &callArgs.callee().as<JSFunction>());
- AsmJSModule& module = FunctionToEnclosingModule(callee);
- const AsmJSModule::ExportedFunction& func = FunctionToExportedFunction(callee, module);
// The heap-changing function is a special-case and is implemented by C++.
- if (func.isChangeHeap())
- return ChangeHeap(cx, module, callArgs);
+ AsmJSModule& asmJSModule = FunctionToEnclosingModule(callee);
+ const AsmJSModule::Export& asmJSFunc = asmJSModule.exports()[FunctionToExportIndex(callee)];
+ if (asmJSFunc.isChangeHeap())
+ return ChangeHeap(cx, asmJSModule, callArgs);
+
+ Module& module = asmJSModule.wasm();
+ const Export& func = module.exports()[asmJSFunc.wasmIndex()];
// Enable/disable profiling in the asm.js module to match the current global
// profiling state. Don't do this if the module is already active on the
// stack since this would leave the module in a state where profiling is
// enabled but the stack isn't unwindable.
if (module.profilingEnabled() != cx->runtime()->spsProfiler.enabled() && !module.active())
module.setProfilingEnabled(cx->runtime()->spsProfiler.enabled(), cx);
// The calling convention for an external call into asm.js is to pass an
// array of 16-byte values where each value contains either a coerced int32
// (in the low word), a double value (in the low dword) or a SIMD vector
// value, with the coercions specified by the asm.js signature. The
// external entry point unpacks this array into the system-ABI-specified
// registers and stack memory and then calls into the internal entry point.
// The return value is stored in the first element of the array (which,
// therefore, must have length >= 1).
- js::Vector<AsmJSModule::EntryArg, 8> coercedArgs(cx);
+ Vector<Module::EntryArg, 8> coercedArgs(cx);
if (!coercedArgs.resize(Max<size_t>(1, func.sig().args().length())))
return false;
RootedValue v(cx);
for (unsigned i = 0; i < func.sig().args().length(); ++i) {
v = i < callArgs.length() ? callArgs[i] : UndefinedValue();
switch (func.sig().arg(i)) {
case ValType::I32:
@@ -734,21 +703,21 @@ CallAsmJS(JSContext* cx, unsigned argc,
}
{
// Push an AsmJSActivation to describe the asm.js frames we're about to
// push when running this module. Additionally, push a JitActivation so
// that the optimized asm.js-to-Ion FFI call path (which we want to be
// very fast) can avoid doing so. The JitActivation is marked as
// inactive so stack iteration will skip over it.
- AsmJSActivation activation(cx, module);
+ AsmJSActivation activation(cx, asmJSModule);
JitActivation jitActivation(cx, /* active */ false);
// Call the per-exported-function trampoline created by GenerateEntry.
- AsmJSModule::CodePtr enter = module.entryTrampoline(func);
+ Module::EntryFuncPtr enter = module.entryTrampoline(func);
if (!CALL_GENERATED_2(enter, coercedArgs.begin(), module.globalData()))
return false;
}
if (callArgs.isConstructing()) {
// By spec, when a function is called as a constructor and this function
// returns a primary type, which is the case for all asm.js exported
// functions, the returned value is discarded and an empty object is
@@ -787,21 +756,24 @@ CallAsmJS(JSContext* cx, unsigned argc,
callArgs.rval().set(ObjectValue(*simdObj));
break;
}
return true;
}
static JSFunction*
-NewExportedFunction(JSContext* cx, const AsmJSModule::ExportedFunction& func,
+NewExportedFunction(JSContext* cx, const AsmJSModule& module, const AsmJSModule::Export& func,
HandleObject moduleObj, unsigned exportIndex)
{
+ unsigned numArgs = func.isChangeHeap()
+ ? 1
+ : module.wasm().exports()[func.wasmIndex()].sig().args().length();
+
RootedPropertyName name(cx, func.name());
- unsigned numArgs = func.isChangeHeap() ? 1 : func.sig().args().length();
JSFunction* fun =
NewNativeConstructor(cx, CallAsmJS, numArgs, name,
gc::AllocKind::FUNCTION_EXTENDED, GenericObject,
JSFunction::ASMJS_CTOR);
if (!fun)
return nullptr;
fun->setExtendedSlot(ASM_MODULE_SLOT, ObjectValue(*moduleObj));
@@ -811,29 +783,31 @@ NewExportedFunction(JSContext* cx, const
static bool
HandleDynamicLinkFailure(JSContext* cx, const CallArgs& args, AsmJSModule& module,
HandlePropertyName name)
{
if (cx->isExceptionPending())
return false;
+ ScriptSource* source = module.scriptSource();
+
// Source discarding is allowed to affect JS semantics because it is never
// enabled for normal JS content.
- bool haveSource = module.scriptSource()->hasSourceData();
- if (!haveSource && !JSScript::loadSource(cx, module.scriptSource(), &haveSource))
+ bool haveSource = source->hasSourceData();
+ if (!haveSource && !JSScript::loadSource(cx, source, &haveSource))
return false;
if (!haveSource) {
JS_ReportError(cx, "asm.js link failure with source discarding enabled");
return false;
}
uint32_t begin = module.srcBodyStart(); // starts right after 'use asm'
uint32_t end = module.srcEndBeforeCurly();
- Rooted<JSFlatString*> src(cx, module.scriptSource()->substringDontDeflate(cx, begin, end));
+ Rooted<JSFlatString*> src(cx, source->substringDontDeflate(cx, begin, end));
if (!src)
return false;
RootedFunction fun(cx, NewScriptedFunction(cx, 0, JSFunction::INTERPRETED_NORMAL,
name, gc::AllocKind::FUNCTION,
TenuredObject));
if (!fun)
return false;
@@ -845,18 +819,18 @@ HandleDynamicLinkFailure(JSContext* cx,
if (module.globalArgumentName())
formals.infallibleAppend(module.globalArgumentName());
if (module.importArgumentName())
formals.infallibleAppend(module.importArgumentName());
if (module.bufferArgumentName())
formals.infallibleAppend(module.bufferArgumentName());
CompileOptions options(cx);
- options.setMutedErrors(module.scriptSource()->mutedErrors())
- .setFile(module.scriptSource()->filename())
+ options.setMutedErrors(source->mutedErrors())
+ .setFile(source->filename())
.setNoScriptRval(false);
// The exported function inherits an implicit strict context if the module
// also inherited it somehow.
if (module.strict())
options.strictOption = true;
AutoStableStringChars stableChars(cx);
@@ -871,122 +845,37 @@ HandleDynamicLinkFailure(JSContext* cx,
if (!frontend::CompileFunctionBody(cx, &fun, options, formals, srcBuf))
return false;
// Call the function we just recompiled.
args.setCallee(ObjectValue(*fun));
return Invoke(cx, args, args.isConstructing() ? CONSTRUCT : NO_CONSTRUCT);
}
-#ifdef MOZ_VTUNE
-static bool
-SendFunctionsToVTune(JSContext* cx, AsmJSModule& module)
+static JSObject*
+CreateExportObject(JSContext* cx, HandleAsmJSModule moduleObj)
{
- uint8_t* base = module.codeBase();
-
- for (unsigned i = 0; i < module.numProfiledFunctions(); i++) {
- const AsmJSModule::ProfiledFunction& func = module.profiledFunction(i);
-
- uint8_t* start = base + func.pod.startCodeOffset;
- uint8_t* end = base + func.pod.endCodeOffset;
- MOZ_ASSERT(end >= start);
-
- unsigned method_id = iJIT_GetNewMethodID();
- if (method_id == 0)
- return false;
+ AsmJSModule& module = moduleObj->module();
+ const AsmJSModule::ExportVector& exports = module.exports();
- JSAutoByteString bytes;
- const char* method_name = AtomToPrintableString(cx, func.name, &bytes);
- if (!method_name)
- return false;
-
- iJIT_Method_Load method;
- method.method_id = method_id;
- method.method_name = const_cast<char*>(method_name);
- method.method_load_address = (void*)start;
- method.method_size = unsigned(end - start);
- method.line_number_size = 0;
- method.line_number_table = nullptr;
- method.class_id = 0;
- method.class_file_name = nullptr;
- method.source_file_name = nullptr;
-
- iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, (void*)&method);
+ if (exports.length() == 1) {
+ const AsmJSModule::Export& func = exports[0];
+ if (!func.maybeFieldName())
+ return NewExportedFunction(cx, module, func, moduleObj, 0);
}
- return true;
-}
-#endif
-
-#ifdef JS_ION_PERF
-static bool
-SendFunctionsToPerf(JSContext* cx, AsmJSModule& module)
-{
- if (!PerfFuncEnabled())
- return true;
-
- uintptr_t base = (uintptr_t) module.codeBase();
- const char* filename = module.scriptSource()->filename();
-
- for (unsigned i = 0; i < module.numProfiledFunctions(); i++) {
- const AsmJSModule::ProfiledFunction& func = module.profiledFunction(i);
- uintptr_t start = base + (unsigned long) func.pod.startCodeOffset;
- uintptr_t end = base + (unsigned long) func.pod.endCodeOffset;
- MOZ_ASSERT(end >= start);
- size_t size = end - start;
-
- JSAutoByteString bytes;
- const char* name = AtomToPrintableString(cx, func.name, &bytes);
- if (!name)
- return false;
-
- writePerfSpewerAsmJSFunctionMap(start, size, filename, func.pod.lineno,
- func.pod.columnIndex, name);
- }
-
- return true;
-}
-#endif
-
-static bool
-SendModuleToAttachedProfiler(JSContext* cx, AsmJSModule& module)
-{
-#if defined(MOZ_VTUNE)
- if (IsVTuneProfilingActive() && !SendFunctionsToVTune(cx, module))
- return false;
-#endif
-#if defined(JS_ION_PERF)
- if (!SendFunctionsToPerf(cx, module))
- return false;
-#endif
-
- return true;
-}
-
-
-static JSObject*
-CreateExportObject(JSContext* cx, Handle<AsmJSModuleObject*> moduleObj)
-{
- AsmJSModule& module = moduleObj->module();
-
- if (module.numExportedFunctions() == 1) {
- const AsmJSModule::ExportedFunction& func = module.exportedFunction(0);
- if (!func.maybeFieldName())
- return NewExportedFunction(cx, func, moduleObj, 0);
- }
-
- gc::AllocKind allocKind = gc::GetGCObjectKind(module.numExportedFunctions());
+ gc::AllocKind allocKind = gc::GetGCObjectKind(exports.length());
RootedPlainObject obj(cx, NewBuiltinClassInstance<PlainObject>(cx, allocKind));
if (!obj)
return nullptr;
- for (unsigned i = 0; i < module.numExportedFunctions(); i++) {
- const AsmJSModule::ExportedFunction& func = module.exportedFunction(i);
+ for (unsigned i = 0; i < exports.length(); i++) {
+ const AsmJSModule::Export& func = exports[i];
- RootedFunction fun(cx, NewExportedFunction(cx, func, moduleObj, i));
+ RootedFunction fun(cx, NewExportedFunction(cx, module, func, moduleObj, i));
if (!fun)
return nullptr;
MOZ_ASSERT(func.maybeFieldName() != nullptr);
RootedId id(cx, NameToId(func.maybeFieldName()));
RootedValue val(cx, ObjectValue(*fun));
if (!NativeDefineProperty(cx, obj, id, val, nullptr, nullptr, JSPROP_ENUMERATE))
return nullptr;
@@ -1009,45 +898,44 @@ LinkAsmJS(JSContext* cx, unsigned argc,
{
CallArgs args = CallArgsFromVp(argc, vp);
// The LinkAsmJS builtin (created by NewAsmJSModuleFunction) is an extended
// function and stores its module in an extended slot.
RootedFunction fun(cx, &args.callee().as<JSFunction>());
Rooted<AsmJSModuleObject*> moduleObj(cx, &ModuleFunctionToModuleObject(fun));
-
// When a module is linked, it is dynamically specialized to the given
// arguments (buffer, ffis). Thus, if the module is linked again (it is just
// a function so it can be called multiple times), we need to clone a new
// module.
- if (moduleObj->module().isDynamicallyLinked() && !CloneModule(cx, &moduleObj))
- return false;
+ if (moduleObj->module().wasm().dynamicallyLinked()) {
+ Rooted<AsmJSModuleObject*> clone(cx, AsmJSModuleObject::create(cx));
+ if (!clone)
+ return false;
+
+ if (!moduleObj->module().clone(cx, clone))
+ return false;
+
+ moduleObj = clone;
+ }
AsmJSModule& module = moduleObj->module();
- AutoFlushICache afc("LinkAsmJS");
- module.setAutoFlushICacheRange();
-
// Link the module by performing the link-time validation checks in the
// asm.js spec and then patching the generated module to associate it with
// the given heap (ArrayBuffer) and a new global data segment (the closure
// state shared by the inner asm.js functions).
if (!DynamicallyLinkModule(cx, args, module)) {
// Linking failed, so reparse the entire asm.js module from scratch to
// get normal interpreted bytecode which we can simply Invoke. Very slow.
RootedPropertyName name(cx, fun->name());
return HandleDynamicLinkFailure(cx, args, module, name);
}
- // Notify profilers so that asm.js generated code shows up with JS function
- // names and lines in native (i.e., not SPS) profilers.
- if (!SendModuleToAttachedProfiler(cx, module))
- return false;
-
// Link-time validation succeeded, so wrap all the exported functions with
// CallAsmJS builtins that trampoline into the generated code.
JSObject* obj = CreateExportObject(cx, moduleObj);
if (!obj)
return false;
args.rval().set(ObjectValue(*obj));
return true;
@@ -1213,17 +1101,17 @@ js::IsAsmJSModuleLoadedFromCache(JSConte
JSFunction* fun;
if (!args.hasDefined(0) || !IsMaybeWrappedNativeFunction(args[0], LinkAsmJS, &fun)) {
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_USE_ASM_TYPE_FAIL,
"argument passed to isAsmJSModuleLoadedFromCache is not a "
"validated asm.js module");
return false;
}
- bool loadedFromCache = ModuleFunctionToModuleObject(fun).module().loadedFromCache();
+ bool loadedFromCache = ModuleFunctionToModuleObject(fun).module().wasm().loadedFromCache();
args.rval().set(BooleanValue(loadedFromCache));
return true;
}
bool
js::IsAsmJSFunction(JSContext* cx, unsigned argc, Value* vp)
{
@@ -1238,17 +1126,17 @@ js::IsAsmJSFunction(HandleFunction fun)
{
return fun->isNative() && fun->maybeNative() == CallAsmJS;
}
JSString*
js::AsmJSFunctionToString(JSContext* cx, HandleFunction fun)
{
AsmJSModule& module = FunctionToEnclosingModule(fun);
- const AsmJSModule::ExportedFunction& f = FunctionToExportedFunction(fun, module);
+ const AsmJSModule::Export& f = module.exports()[FunctionToExportIndex(fun)];
uint32_t begin = module.srcStart() + f.startOffsetInModule();
uint32_t end = module.srcStart() + f.endOffsetInModule();
ScriptSource* source = module.scriptSource();
StringBuffer out(cx);
if (!out.append("function "))
return nullptr;
--- a/js/src/asmjs/AsmJSModule.cpp
+++ b/js/src/asmjs/AsmJSModule.cpp
@@ -13,962 +13,132 @@
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asmjs/AsmJSModule.h"
-#include "mozilla/BinarySearch.h"
#include "mozilla/Compression.h"
#include "mozilla/EnumeratedRange.h"
#include "mozilla/PodOperations.h"
-#include "mozilla/TaggedAnonymousMemory.h"
-#include "mozilla/Vector.h"
-#include "jslibmath.h"
-#include "jsmath.h"
#include "jsprf.h"
-#include "builtin/AtomicsObject.h"
+#include "asmjs/WasmSerialize.h"
#include "frontend/Parser.h"
-#include "jit/IonCode.h"
-#ifdef JS_ION_PERF
-# include "jit/PerfSpewer.h"
-#endif
#include "js/Class.h"
-#include "js/Conversions.h"
#include "js/MemoryMetrics.h"
-#include "vm/Time.h"
#include "jsobjinlines.h"
#include "frontend/ParseNode-inl.h"
-#include "jit/MacroAssembler-inl.h"
-#include "vm/ArrayBufferObject-inl.h"
-#include "vm/Stack-inl.h"
using namespace js;
+using namespace js::frontend;
using namespace js::jit;
using namespace js::wasm;
-using namespace js::frontend;
-using mozilla::BinarySearch;
-using mozilla::Compression::LZ4;
-using mozilla::MakeEnumeratedRange;
-using mozilla::MallocSizeOf;
-using mozilla::PodCopy;
+using mozilla::PodZero;
using mozilla::PodEqual;
-using mozilla::PodZero;
-using mozilla::Swap;
-using JS::GenericNaN;
-
-static uint8_t*
-AllocateExecutableMemory(ExclusiveContext* cx, size_t bytes)
-{
- // On most platforms, this will allocate RWX memory. On iOS, or when
- // --non-writable-jitcode is used, this will allocate RW memory. In this
- // case, DynamicallyLinkModule will reprotect the code as RX.
- unsigned permissions =
- ExecutableAllocator::initialProtectionFlags(ExecutableAllocator::Writable);
- void* p = AllocateExecutableMemory(nullptr, bytes, permissions, "asm-js-code", AsmJSPageSize);
- if (!p)
- ReportOutOfMemory(cx);
- return (uint8_t*)p;
-}
+using mozilla::Compression::LZ4;
AsmJSModule::AsmJSModule(ScriptSource* scriptSource, uint32_t srcStart, uint32_t srcBodyStart,
- bool strict, bool canUseSignalHandlers)
- : srcStart_(srcStart),
+ bool strict)
+ : scriptSource_(scriptSource),
+ srcStart_(srcStart),
srcBodyStart_(srcBodyStart),
- scriptSource_(scriptSource),
globalArgumentName_(nullptr),
importArgumentName_(nullptr),
- bufferArgumentName_(nullptr),
- code_(nullptr),
- interruptExit_(nullptr),
- prevLinked_(nullptr),
- nextLinked_(nullptr),
- dynamicallyLinked_(false),
- loadedFromCache_(false),
- profilingEnabled_(false),
- interrupted_(false)
+ bufferArgumentName_(nullptr)
{
mozilla::PodZero(&pod);
- pod.globalBytes_ = sInitialGlobalDataBytes;
pod.minHeapLength_ = RoundUpToNextValidAsmJSHeapLength(0);
pod.maxHeapLength_ = 0x80000000;
pod.strict_ = strict;
- pod.canUseSignalHandlers_ = canUseSignalHandlers;
// AsmJSCheckedImmediateRange should be defined to be at most the minimum
// heap length so that offsets can be folded into bounds checks.
MOZ_ASSERT(pod.minHeapLength_ - AsmJSCheckedImmediateRange <= pod.minHeapLength_);
-
- scriptSource_->incref();
-}
-
-AsmJSModule::~AsmJSModule()
-{
- MOZ_ASSERT(!interrupted_);
-
- scriptSource_->decref();
-
- if (code_) {
- for (unsigned i = 0; i < numExits(); i++) {
- AsmJSModule::ExitDatum& exitDatum = exit(i).datum(*this);
- if (!exitDatum.baselineScript)
- continue;
-
- jit::DependentAsmJSModuleExit exit(this, i);
- exitDatum.baselineScript->removeDependentAsmJSModule(exit);
- }
-
- DeallocateExecutableMemory(code_, pod.totalBytes_, AsmJSPageSize);
- }
-
- if (prevLinked_)
- *prevLinked_ = nextLinked_;
- if (nextLinked_)
- nextLinked_->prevLinked_ = prevLinked_;
}
void
AsmJSModule::trace(JSTracer* trc)
{
+ if (wasm_)
+ wasm_->trace(trc);
for (Global& global : globals_)
global.trace(trc);
- for (Exit& exit : exits_) {
- if (exit.datum(*this).fun)
- TraceEdge(trc, &exit.datum(*this).fun, "asm.js imported function");
- }
- for (ExportedFunction& exp : exports_)
+ for (Export& exp : exports_)
exp.trace(trc);
- for (Name& name : names_)
- TraceManuallyBarrieredEdge(trc, &name.name(), "asm.js module function name");
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
- for (ProfiledFunction& profiledFunction : profiledFunctions_)
- profiledFunction.trace(trc);
-#endif
if (globalArgumentName_)
TraceManuallyBarrieredEdge(trc, &globalArgumentName_, "asm.js global argument name");
if (importArgumentName_)
TraceManuallyBarrieredEdge(trc, &importArgumentName_, "asm.js import argument name");
if (bufferArgumentName_)
TraceManuallyBarrieredEdge(trc, &bufferArgumentName_, "asm.js buffer argument name");
- if (maybeHeap_)
- TraceEdge(trc, &maybeHeap_, "asm.js heap");
}
void
AsmJSModule::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode,
size_t* asmJSModuleData)
{
- *asmJSModuleCode += pod.totalBytes_;
+ if (wasm_)
+ wasm_->addSizeOfMisc(mallocSizeOf, asmJSModuleCode, asmJSModuleData);
+
+ if (linkData_)
+ *asmJSModuleData += linkData_->sizeOfExcludingThis(mallocSizeOf);
+
*asmJSModuleData += mallocSizeOf(this) +
globals_.sizeOfExcludingThis(mallocSizeOf) +
- exits_.sizeOfExcludingThis(mallocSizeOf) +
- exports_.sizeOfExcludingThis(mallocSizeOf) +
- callSites_.sizeOfExcludingThis(mallocSizeOf) +
- codeRanges_.sizeOfExcludingThis(mallocSizeOf) +
- names_.sizeOfExcludingThis(mallocSizeOf) +
- heapAccesses_.sizeOfExcludingThis(mallocSizeOf) +
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
- profiledFunctions_.sizeOfExcludingThis(mallocSizeOf) +
-#endif
- staticLinkData_.sizeOfExcludingThis(mallocSizeOf);
-}
-
-struct CallSiteRetAddrOffset
-{
- const CallSiteVector& callSites;
- explicit CallSiteRetAddrOffset(const CallSiteVector& callSites) : callSites(callSites) {}
- uint32_t operator[](size_t index) const {
- return callSites[index].returnAddressOffset();
- }
-};
-
-const CallSite*
-AsmJSModule::lookupCallSite(void* returnAddress) const
-{
- MOZ_ASSERT(isFinished());
-
- uint32_t target = ((uint8_t*)returnAddress) - code_;
- size_t lowerBound = 0;
- size_t upperBound = callSites_.length();
-
- size_t match;
- if (!BinarySearch(CallSiteRetAddrOffset(callSites_), lowerBound, upperBound, target, &match))
- return nullptr;
-
- return &callSites_[match];
+ imports_.sizeOfExcludingThis(mallocSizeOf) +
+ exports_.sizeOfExcludingThis(mallocSizeOf);
}
-namespace js {
-
-// Create an ordering on CodeRange and pc offsets suitable for BinarySearch.
-// Stick these in the same namespace as AsmJSModule so that argument-dependent
-// lookup will find it.
-bool
-operator==(size_t pcOffset, const AsmJSModule::CodeRange& rhs)
-{
- return pcOffset >= rhs.begin() && pcOffset < rhs.end();
-}
-bool
-operator<=(const AsmJSModule::CodeRange& lhs, const AsmJSModule::CodeRange& rhs)
-{
- return lhs.begin() <= rhs.begin();
-}
-bool
-operator<(size_t pcOffset, const AsmJSModule::CodeRange& rhs)
-{
- return pcOffset < rhs.begin();
-}
-
-} // namespace js
-
-const AsmJSModule::CodeRange*
-AsmJSModule::lookupCodeRange(void* pc) const
-{
- MOZ_ASSERT(isFinished());
-
- uint32_t target = ((uint8_t*)pc) - code_;
- size_t lowerBound = 0;
- size_t upperBound = codeRanges_.length();
-
- size_t match;
- if (!BinarySearch(codeRanges_, lowerBound, upperBound, target, &match))
- return nullptr;
-
- return &codeRanges_[match];
-}
-
-struct HeapAccessOffset
-{
- const HeapAccessVector& accesses;
- explicit HeapAccessOffset(const HeapAccessVector& accesses) : accesses(accesses) {}
- uintptr_t operator[](size_t index) const {
- return accesses[index].insnOffset();
- }
-};
-
-const HeapAccess*
-AsmJSModule::lookupHeapAccess(void* pc) const
-{
- MOZ_ASSERT(isFinished());
- MOZ_ASSERT(containsFunctionPC(pc));
-
- uint32_t target = ((uint8_t*)pc) - code_;
- size_t lowerBound = 0;
- size_t upperBound = heapAccesses_.length();
-
- size_t match;
- if (!BinarySearch(HeapAccessOffset(heapAccesses_), lowerBound, upperBound, target, &match))
- return nullptr;
-
- return &heapAccesses_[match];
-}
-
-bool
-AsmJSModule::finish(ExclusiveContext* cx, TokenStream& tokenStream, MacroAssembler& masm)
+void
+AsmJSModule::finish(Module* wasm, wasm::UniqueStaticLinkData linkData,
+ uint32_t endBeforeCurly, uint32_t endAfterCurly)
{
MOZ_ASSERT(!isFinished());
- uint32_t endBeforeCurly = tokenStream.currentToken().pos.end;
- TokenPos pos;
- if (!tokenStream.peekTokenPos(&pos, TokenStream::Operand))
- return false;
- uint32_t endAfterCurly = pos.end;
+ wasm_.reset(wasm);
+ linkData_ = Move(linkData);
+
MOZ_ASSERT(endBeforeCurly >= srcBodyStart_);
MOZ_ASSERT(endAfterCurly >= srcBodyStart_);
pod.srcLength_ = endBeforeCurly - srcStart_;
pod.srcLengthWithRightBrace_ = endAfterCurly - srcStart_;
- // Start global data on a new page so JIT code may be given independent
- // protection flags.
- pod.codeBytes_ = AlignBytes(masm.bytesNeeded(), AsmJSPageSize);
- MOZ_ASSERT(pod.functionBytes_ <= pod.codeBytes_);
-
- // The entire region is allocated via mmap/VirtualAlloc which requires
- // units of pages.
- pod.totalBytes_ = AlignBytes(pod.codeBytes_ + pod.globalBytes_, AsmJSPageSize);
-
- MOZ_ASSERT(!code_);
- code_ = AllocateExecutableMemory(cx, pod.totalBytes_);
- if (!code_)
- return false;
-
- // Delay flushing until dynamic linking. The flush-inhibited range is set within
- // masm.executableCopy.
- AutoFlushICache afc("CheckModule", /* inhibit = */ true);
-
- // Copy the code from the MacroAssembler into its final resting place in the
- // AsmJSModule.
- MOZ_ASSERT(uintptr_t(code_) % AsmJSPageSize == 0);
- masm.executableCopy(code_);
-
- // c.f. JitCode::copyFrom
- MOZ_ASSERT(masm.jumpRelocationTableBytes() == 0);
- MOZ_ASSERT(masm.dataRelocationTableBytes() == 0);
- MOZ_ASSERT(masm.preBarrierTableBytes() == 0);
- MOZ_ASSERT(!masm.hasSelfReference());
-
- // Heap-access metadata used for link-time patching and fault-handling.
- heapAccesses_ = masm.extractHeapAccesses();
-
- // Call-site metadata used for stack unwinding.
- const CallSiteAndTargetVector& callSites = masm.callSites();
- if (!callSites_.appendAll(callSites))
- return false;
-
- // Absolute link metadata: absolute addresses that refer to some fixed
- // address in the address space.
- AbsoluteLinkArray& absoluteLinks = staticLinkData_.absoluteLinks;
- for (size_t i = 0; i < masm.numAsmJSAbsoluteLinks(); i++) {
- AsmJSAbsoluteLink src = masm.asmJSAbsoluteLink(i);
- if (!absoluteLinks[src.target].append(src.patchAt.offset()))
- return false;
- }
-
- // Relative link metadata: absolute addresses that refer to another point within
- // the asm.js module.
-
- // CodeLabels are used for switch cases and loads from floating-point /
- // SIMD values in the constant pool.
- for (size_t i = 0; i < masm.numCodeLabels(); i++) {
- CodeLabel cl = masm.codeLabel(i);
- RelativeLink link(RelativeLink::CodeLabel);
- link.patchAtOffset = masm.labelToPatchOffset(*cl.patchAt());
- link.targetOffset = cl.target()->offset();
- if (!staticLinkData_.relativeLinks.append(link))
- return false;
- }
-
-#if defined(JS_CODEGEN_X86)
- // Global data accesses in x86 need to be patched with the absolute
- // address of the global. Globals are allocated sequentially after the
- // code section so we can just use an RelativeLink.
- for (size_t i = 0; i < masm.numAsmJSGlobalAccesses(); i++) {
- AsmJSGlobalAccess a = masm.asmJSGlobalAccess(i);
- RelativeLink link(RelativeLink::RawPointer);
- link.patchAtOffset = masm.labelToPatchOffset(a.patchAt);
- link.targetOffset = offsetOfGlobalData() + a.globalDataOffset;
- if (!staticLinkData_.relativeLinks.append(link))
- return false;
- }
-#endif
-
-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
- // On MIPS we need to update all the long jumps because they contain an
- // absolute adress. The values are correctly patched for the current address
- // space, but not after serialization or profiling-mode toggling.
- for (size_t i = 0; i < masm.numLongJumps(); i++) {
- size_t off = masm.longJump(i);
- RelativeLink link(RelativeLink::InstructionImmediate);
- link.patchAtOffset = off;
- link.targetOffset = Assembler::ExtractInstructionImmediate(code_ + off) - uintptr_t(code_);
- if (!staticLinkData_.relativeLinks.append(link))
- return false;
- }
-#endif
-
-#if defined(JS_CODEGEN_X64)
- // Global data accesses on x64 use rip-relative addressing and thus do
- // not need patching after deserialization.
- for (size_t i = 0; i < masm.numAsmJSGlobalAccesses(); i++) {
- AsmJSGlobalAccess a = masm.asmJSGlobalAccess(i);
- masm.patchAsmJSGlobalAccess(a.patchAt, code_, globalData(), a.globalDataOffset);
- }
-#endif
-
- return true;
-}
-
-void
-AsmJSModule::setAutoFlushICacheRange()
-{
MOZ_ASSERT(isFinished());
- AutoFlushICache::setRange(uintptr_t(code_), pod.codeBytes_);
-}
-
-static void
-AsmJSReportOverRecursed()
-{
- JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
- ReportOverRecursed(cx);
-}
-
-static void
-OnDetached()
-{
- // See hasDetachedHeap comment in LinkAsmJS.
- JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
- JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_OUT_OF_MEMORY);
-}
-
-static void
-OnOutOfBounds()
-{
- JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
- JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
-}
-
-static void
-OnImpreciseConversion()
-{
- JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
- JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_SIMD_FAILED_CONVERSION);
-}
-
-static bool
-AsmJSHandleExecutionInterrupt()
-{
- AsmJSActivation* act = JSRuntime::innermostAsmJSActivation();
- act->module().setInterrupted(true);
- bool ret = CheckForInterrupt(act->cx());
- act->module().setInterrupted(false);
- return ret;
-}
-
-static int32_t
-CoerceInPlace_ToInt32(MutableHandleValue val)
-{
- JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
-
- int32_t i32;
- if (!ToInt32(cx, val, &i32))
- return false;
- val.set(Int32Value(i32));
-
- return true;
-}
-
-static int32_t
-CoerceInPlace_ToNumber(MutableHandleValue val)
-{
- JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
-
- double dbl;
- if (!ToNumber(cx, val, &dbl))
- return false;
- val.set(DoubleValue(dbl));
-
- return true;
-}
-
-static bool
-TryEnablingJit(JSContext* cx, AsmJSModule& module, HandleFunction fun, uint32_t exitIndex,
- int32_t argc, Value* argv)
-{
- if (!fun->hasScript())
- return true;
-
- // Test if the function is JIT compiled.
- JSScript* script = fun->nonLazyScript();
- if (!script->hasBaselineScript()) {
- MOZ_ASSERT(!script->hasIonScript());
- return true;
- }
-
- // Don't enable jit entry when we have a pending ion builder.
- // Take the interpreter path which will link it and enable
- // the fast path on the next call.
- if (script->baselineScript()->hasPendingIonBuilder())
- return true;
-
- // Currently we can't rectify arguments. Therefore disabling if argc is too low.
- if (fun->nargs() > size_t(argc))
- return true;
-
- // Ensure the argument types are included in the argument TypeSets stored in
- // the TypeScript. This is necessary for Ion, because the FFI exit will
- // use the skip-arg-checks entry point.
- //
- // Note that the TypeScript is never discarded while the script has a
- // BaselineScript, so if those checks hold now they must hold at least until
- // the BaselineScript is discarded and when that happens the FFI exit is
- // patched back.
- if (!TypeScript::ThisTypes(script)->hasType(TypeSet::UndefinedType()))
- return true;
- for (uint32_t i = 0; i < fun->nargs(); i++) {
- StackTypeSet* typeset = TypeScript::ArgTypes(script, i);
- TypeSet::Type type = TypeSet::DoubleType();
- if (!argv[i].isDouble())
- type = TypeSet::PrimitiveType(argv[i].extractNonDoubleType());
- if (!typeset->hasType(type))
- return true;
- }
-
- // The exit may have become optimized while executing the FFI.
- AsmJSModule::Exit& exit = module.exit(exitIndex);
- if (exit.isOptimized(module))
- return true;
-
- BaselineScript* baselineScript = script->baselineScript();
- if (!baselineScript->addDependentAsmJSModule(cx, DependentAsmJSModuleExit(&module, exitIndex)))
- return false;
-
- exit.optimize(module, baselineScript);
- return true;
-}
-
-static bool
-InvokeFromAsmJS(AsmJSActivation* activation, int32_t exitIndex, int32_t argc, Value* argv,
- MutableHandleValue rval)
-{
- JSContext* cx = activation->cx();
- AsmJSModule& module = activation->module();
-
- RootedFunction fun(cx, module.exit(exitIndex).datum(module).fun);
- RootedValue fval(cx, ObjectValue(*fun));
- if (!Invoke(cx, UndefinedValue(), fval, argc, argv, rval))
- return false;
-
- return TryEnablingJit(cx, module, fun, exitIndex, argc, argv);
-}
-
-// Use an int32_t return type instead of bool since bool does not have a
-// specified width and the caller is assuming a word-sized return.
-static int32_t
-InvokeFromAsmJS_Ignore(int32_t exitIndex, int32_t argc, Value* argv)
-{
- AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation();
- JSContext* cx = activation->cx();
-
- RootedValue rval(cx);
- return InvokeFromAsmJS(activation, exitIndex, argc, argv, &rval);
-}
-
-// Use an int32_t return type instead of bool since bool does not have a
-// specified width and the caller is assuming a word-sized return.
-static int32_t
-InvokeFromAsmJS_ToInt32(int32_t exitIndex, int32_t argc, Value* argv)
-{
- AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation();
- JSContext* cx = activation->cx();
-
- RootedValue rval(cx);
- if (!InvokeFromAsmJS(activation, exitIndex, argc, argv, &rval))
- return false;
-
- int32_t i32;
- if (!ToInt32(cx, rval, &i32))
- return false;
-
- argv[0] = Int32Value(i32);
- return true;
-}
-
-// Use an int32_t return type instead of bool since bool does not have a
-// specified width and the caller is assuming a word-sized return.
-static int32_t
-InvokeFromAsmJS_ToNumber(int32_t exitIndex, int32_t argc, Value* argv)
-{
- AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation();
- JSContext* cx = activation->cx();
-
- RootedValue rval(cx);
- if (!InvokeFromAsmJS(activation, exitIndex, argc, argv, &rval))
- return false;
-
- double dbl;
- if (!ToNumber(cx, rval, &dbl))
- return false;
-
- argv[0] = DoubleValue(dbl);
- return true;
-}
-
-#if defined(JS_CODEGEN_ARM)
-extern "C" {
-
-extern MOZ_EXPORT int64_t
-__aeabi_idivmod(int, int);
-
-extern MOZ_EXPORT int64_t
-__aeabi_uidivmod(int, int);
-
-}
-#endif
-
-template <class F>
-static inline void*
-FuncCast(F* pf)
-{
- return JS_FUNC_TO_DATA_PTR(void*, pf);
-}
-
-static void*
-RedirectCall(void* fun, ABIFunctionType type)
-{
-#ifdef JS_SIMULATOR
- fun = Simulator::RedirectNativeFunction(fun, type);
-#endif
- return fun;
-}
-
-static void*
-AddressOf(SymbolicAddress imm, ExclusiveContext* cx)
-{
- switch (imm) {
- case SymbolicAddress::Runtime:
- return cx->runtimeAddressForJit();
- case SymbolicAddress::RuntimeInterruptUint32:
- return cx->runtimeAddressOfInterruptUint32();
- case SymbolicAddress::StackLimit:
- return cx->stackLimitAddressForJitCode(StackForUntrustedScript);
- case SymbolicAddress::ReportOverRecursed:
- return RedirectCall(FuncCast(AsmJSReportOverRecursed), Args_General0);
- case SymbolicAddress::OnDetached:
- return RedirectCall(FuncCast(OnDetached), Args_General0);
- case SymbolicAddress::OnOutOfBounds:
- return RedirectCall(FuncCast(OnOutOfBounds), Args_General0);
- case SymbolicAddress::OnImpreciseConversion:
- return RedirectCall(FuncCast(OnImpreciseConversion), Args_General0);
- case SymbolicAddress::HandleExecutionInterrupt:
- return RedirectCall(FuncCast(AsmJSHandleExecutionInterrupt), Args_General0);
- case SymbolicAddress::InvokeFromAsmJS_Ignore:
- return RedirectCall(FuncCast(InvokeFromAsmJS_Ignore), Args_General3);
- case SymbolicAddress::InvokeFromAsmJS_ToInt32:
- return RedirectCall(FuncCast(InvokeFromAsmJS_ToInt32), Args_General3);
- case SymbolicAddress::InvokeFromAsmJS_ToNumber:
- return RedirectCall(FuncCast(InvokeFromAsmJS_ToNumber), Args_General3);
- case SymbolicAddress::CoerceInPlace_ToInt32:
- return RedirectCall(FuncCast(CoerceInPlace_ToInt32), Args_General1);
- case SymbolicAddress::CoerceInPlace_ToNumber:
- return RedirectCall(FuncCast(CoerceInPlace_ToNumber), Args_General1);
- case SymbolicAddress::ToInt32:
- return RedirectCall(FuncCast<int32_t (double)>(JS::ToInt32), Args_Int_Double);
-#if defined(JS_CODEGEN_ARM)
- case SymbolicAddress::aeabi_idivmod:
- return RedirectCall(FuncCast(__aeabi_idivmod), Args_General2);
- case SymbolicAddress::aeabi_uidivmod:
- return RedirectCall(FuncCast(__aeabi_uidivmod), Args_General2);
- case SymbolicAddress::AtomicCmpXchg:
- return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t, int32_t)>(js::atomics_cmpxchg_asm_callout), Args_General4);
- case SymbolicAddress::AtomicXchg:
- return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_xchg_asm_callout), Args_General3);
- case SymbolicAddress::AtomicFetchAdd:
- return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_add_asm_callout), Args_General3);
- case SymbolicAddress::AtomicFetchSub:
- return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_sub_asm_callout), Args_General3);
- case SymbolicAddress::AtomicFetchAnd:
- return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_and_asm_callout), Args_General3);
- case SymbolicAddress::AtomicFetchOr:
- return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_or_asm_callout), Args_General3);
- case SymbolicAddress::AtomicFetchXor:
- return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_xor_asm_callout), Args_General3);
-#endif
- case SymbolicAddress::ModD:
- return RedirectCall(FuncCast(NumberMod), Args_Double_DoubleDouble);
- case SymbolicAddress::SinD:
-#ifdef _WIN64
- // Workaround a VS 2013 sin issue, see math_sin_uncached.
- return RedirectCall(FuncCast<double (double)>(js::math_sin_uncached), Args_Double_Double);
-#else
- return RedirectCall(FuncCast<double (double)>(sin), Args_Double_Double);
-#endif
- case SymbolicAddress::CosD:
- return RedirectCall(FuncCast<double (double)>(cos), Args_Double_Double);
- case SymbolicAddress::TanD:
- return RedirectCall(FuncCast<double (double)>(tan), Args_Double_Double);
- case SymbolicAddress::ASinD:
- return RedirectCall(FuncCast<double (double)>(asin), Args_Double_Double);
- case SymbolicAddress::ACosD:
- return RedirectCall(FuncCast<double (double)>(acos), Args_Double_Double);
- case SymbolicAddress::ATanD:
- return RedirectCall(FuncCast<double (double)>(atan), Args_Double_Double);
- case SymbolicAddress::CeilD:
- return RedirectCall(FuncCast<double (double)>(ceil), Args_Double_Double);
- case SymbolicAddress::CeilF:
- return RedirectCall(FuncCast<float (float)>(ceilf), Args_Float32_Float32);
- case SymbolicAddress::FloorD:
- return RedirectCall(FuncCast<double (double)>(floor), Args_Double_Double);
- case SymbolicAddress::FloorF:
- return RedirectCall(FuncCast<float (float)>(floorf), Args_Float32_Float32);
- case SymbolicAddress::ExpD:
- return RedirectCall(FuncCast<double (double)>(exp), Args_Double_Double);
- case SymbolicAddress::LogD:
- return RedirectCall(FuncCast<double (double)>(log), Args_Double_Double);
- case SymbolicAddress::PowD:
- return RedirectCall(FuncCast(ecmaPow), Args_Double_DoubleDouble);
- case SymbolicAddress::ATan2D:
- return RedirectCall(FuncCast(ecmaAtan2), Args_Double_DoubleDouble);
- case SymbolicAddress::Limit:
- break;
- }
-
- MOZ_CRASH("Bad SymbolicAddress");
-}
-
-void
-AsmJSModule::staticallyLink(ExclusiveContext* cx)
-{
- MOZ_ASSERT(isFinished());
-
- // Process staticLinkData_
-
- MOZ_ASSERT(staticLinkData_.pod.interruptExitOffset != 0);
- interruptExit_ = code_ + staticLinkData_.pod.interruptExitOffset;
-
- MOZ_ASSERT(staticLinkData_.pod.outOfBoundsExitOffset != 0);
- outOfBoundsExit_ = code_ + staticLinkData_.pod.outOfBoundsExitOffset;
-
- for (size_t i = 0; i < staticLinkData_.relativeLinks.length(); i++) {
- RelativeLink link = staticLinkData_.relativeLinks[i];
- uint8_t* patchAt = code_ + link.patchAtOffset;
- uint8_t* target = code_ + link.targetOffset;
-
- // In the case of long-jumps on MIPS and possibly future cases, a
- // RelativeLink is used to patch a pointer to the function entry. If
- // profiling is enabled (by cloning a module with profiling enabled),
- // the target should be the profiling entry.
- if (profilingEnabled_) {
- const CodeRange* codeRange = lookupCodeRange(target);
- if (codeRange && codeRange->isFunction() && link.targetOffset == codeRange->entry())
- target = code_ + codeRange->profilingEntry();
- }
-
- if (link.isRawPointerPatch())
- *(uint8_t**)(patchAt) = target;
- else
- Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
- }
-
- for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
- const OffsetVector& offsets = staticLinkData_.absoluteLinks[imm];
- for (size_t i = 0; i < offsets.length(); i++) {
- uint8_t* patchAt = code_ + offsets[i];
- void* target = AddressOf(imm, cx);
-
- // Builtin calls are another case where, when profiling is enabled,
- // we must point to the profiling entry.
- Builtin builtin;
- if (profilingEnabled_ && ImmediateIsBuiltin(imm, &builtin)) {
- const CodeRange* codeRange = lookupCodeRange(patchAt);
- if (codeRange->isFunction())
- target = code_ + staticLinkData_.pod.builtinThunkOffsets[builtin];
- }
-
- Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
- PatchedImmPtr(target),
- PatchedImmPtr((void*)-1));
- }
- }
-
- // Initialize global data segment
-
- *(double*)(globalData() + NaN64GlobalDataOffset) = GenericNaN();
- *(float*)(globalData() + NaN32GlobalDataOffset) = GenericNaN();
-
- for (size_t tableIndex = 0; tableIndex < staticLinkData_.funcPtrTables.length(); tableIndex++) {
- FuncPtrTable& funcPtrTable = staticLinkData_.funcPtrTables[tableIndex];
- const OffsetVector& offsets = funcPtrTable.elemOffsets();
- auto array = reinterpret_cast<void**>(globalData() + funcPtrTable.globalDataOffset());
- for (size_t elemIndex = 0; elemIndex < offsets.length(); elemIndex++) {
- uint8_t* target = code_ + offsets[elemIndex];
- if (profilingEnabled_)
- target = code_ + lookupCodeRange(target)->profilingEntry();
- array[elemIndex] = target;
- }
- }
-
- for (AsmJSModule::Exit& exit : exits_)
- exit.initDatum(*this);
-}
-
-void
-AsmJSModule::initHeap(Handle<ArrayBufferObjectMaybeShared*> heap, JSContext* cx)
-{
- MOZ_ASSERT_IF(heap->is<ArrayBufferObject>(), heap->as<ArrayBufferObject>().isAsmJS());
- MOZ_ASSERT(IsValidAsmJSHeapLength(heap->byteLength()));
- MOZ_ASSERT(dynamicallyLinked_);
- MOZ_ASSERT(!maybeHeap_);
-
- maybeHeap_ = heap;
- // heapDatum() may point to shared memory but that memory is only
- // accessed from maybeHeap(), which wraps it, and from
- // hasDetachedHeap(), which checks it for null.
- heapDatum() = heap->dataPointerEither().unwrap(/*safe - explained above*/);
-
-#if defined(JS_CODEGEN_X86)
- uint8_t* heapOffset = heap->dataPointerEither().unwrap(/*safe - used for value*/);
- uint32_t heapLength = heap->byteLength();
- for (unsigned i = 0; i < heapAccesses_.length(); i++) {
- const HeapAccess& access = heapAccesses_[i];
- // An access is out-of-bounds iff
- // ptr + offset + data-type-byte-size > heapLength
- // i.e. ptr > heapLength - data-type-byte-size - offset.
- // data-type-byte-size and offset are already included in the addend
- // so we just have to add the heap length here.
- if (access.hasLengthCheck())
- X86Encoding::AddInt32(access.patchLengthAt(code_), heapLength);
- void* addr = access.patchHeapPtrImmAt(code_);
- uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
- MOZ_ASSERT(disp <= INT32_MAX);
- X86Encoding::SetPointer(addr, (void*)(heapOffset + disp));
- }
-#elif defined(JS_CODEGEN_X64)
- // Even with signal handling being used for most bounds checks, there may be
- // atomic operations that depend on explicit checks.
- //
- // If we have any explicit bounds checks, we need to patch the heap length
- // checks at the right places. All accesses that have been recorded are the
- // only ones that need bound checks (see also
- // CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
- uint32_t heapLength = heap->byteLength();
- for (size_t i = 0; i < heapAccesses_.length(); i++) {
- const HeapAccess& access = heapAccesses_[i];
- // See comment above for x86 codegen.
- if (access.hasLengthCheck())
- X86Encoding::AddInt32(access.patchLengthAt(code_), heapLength);
- }
-#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
- uint32_t heapLength = heap->byteLength();
- for (unsigned i = 0; i < heapAccesses_.length(); i++) {
- jit::Assembler::UpdateBoundsCheck(heapLength,
- (jit::Instruction*)(heapAccesses_[i].insnOffset() + code_));
- }
-#endif
-}
-
-void
-AsmJSModule::restoreHeapToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer)
-{
-#if defined(JS_CODEGEN_X86)
- if (maybePrevBuffer) {
- // Subtract out the base-pointer added by AsmJSModule::initHeap.
- uint8_t* ptrBase = maybePrevBuffer->dataPointerEither().unwrap(/*safe - used for value*/);
- uint32_t heapLength = maybePrevBuffer->byteLength();
- for (unsigned i = 0; i < heapAccesses_.length(); i++) {
- const HeapAccess& access = heapAccesses_[i];
- // Subtract the heap length back out, leaving the raw displacement in place.
- if (access.hasLengthCheck())
- X86Encoding::AddInt32(access.patchLengthAt(code_), -heapLength);
- void* addr = access.patchHeapPtrImmAt(code_);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(X86Encoding::GetPointer(addr));
- MOZ_ASSERT(ptr >= ptrBase);
- X86Encoding::SetPointer(addr, (void*)(ptr - ptrBase));
- }
- }
-#elif defined(JS_CODEGEN_X64)
- if (maybePrevBuffer) {
- uint32_t heapLength = maybePrevBuffer->byteLength();
- for (unsigned i = 0; i < heapAccesses_.length(); i++) {
- const HeapAccess& access = heapAccesses_[i];
- // See comment above for x86 codegen.
- if (access.hasLengthCheck())
- X86Encoding::AddInt32(access.patchLengthAt(code_), -heapLength);
- }
- }
-#endif
-
- maybeHeap_ = nullptr;
- heapDatum() = nullptr;
-}
-
-void
-AsmJSModule::restoreToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer,
- uint8_t* prevCode,
- ExclusiveContext* cx)
-{
-#ifdef DEBUG
- // Put the absolute links back to -1 so PatchDataWithValueCheck assertions
- // in staticallyLink are valid.
- for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
- void* callee = AddressOf(imm, cx);
-
- // If we are in profiling mode, calls to builtins will have been patched
- // by setProfilingEnabled to be calls to thunks.
- Builtin builtin;
- void* profilingCallee = profilingEnabled_ && ImmediateIsBuiltin(imm, &builtin)
- ? prevCode + staticLinkData_.pod.builtinThunkOffsets[builtin]
- : nullptr;
-
- const AsmJSModule::OffsetVector& offsets = staticLinkData_.absoluteLinks[imm];
- for (size_t i = 0; i < offsets.length(); i++) {
- uint8_t* caller = code_ + offsets[i];
- void* originalValue = profilingCallee && !lookupCodeRange(caller)->isThunk()
- ? profilingCallee
- : callee;
- Assembler::PatchDataWithValueCheck(CodeLocationLabel(caller),
- PatchedImmPtr((void*)-1),
- PatchedImmPtr(originalValue));
- }
- }
-#endif
-
- restoreHeapToInitialState(maybePrevBuffer);
-}
-
-namespace {
-
-class MOZ_STACK_CLASS AutoMutateCode
-{
- AutoWritableJitCode awjc_;
- AutoFlushICache afc_;
-
- public:
- AutoMutateCode(JSContext* cx, AsmJSModule& module, const char* name)
- : awjc_(cx->runtime(), module.codeBase(), module.codeBytes()),
- afc_(name)
- {
- module.setAutoFlushICacheRange();
- }
-};
-
-} // namespace
-
-bool
-AsmJSModule::detachHeap(JSContext* cx)
-{
- MOZ_ASSERT(isDynamicallyLinked());
- MOZ_ASSERT(maybeHeap_);
-
- // Content JS should not be able to run (and detach heap) from within an
- // interrupt callback, but in case it does, fail. Otherwise, the heap can
- // change at an arbitrary instruction and break the assumption below.
- if (interrupted_) {
- JS_ReportError(cx, "attempt to detach from inside interrupt handler");
- return false;
- }
-
- // Even if this->active(), to reach here, the activation must have called
- // out via an FFI stub. FFI stubs check if heapDatum() is null on reentry
- // and throw an exception if so.
- MOZ_ASSERT_IF(active(), activation()->exitReason().kind() == ExitReason::Jit ||
- activation()->exitReason().kind() == ExitReason::Slow);
-
- AutoMutateCode amc(cx, *this, "AsmJSModule::detachHeap");
- restoreHeapToInitialState(maybeHeap_);
-
- MOZ_ASSERT(hasDetachedHeap());
- return true;
}
bool
js::OnDetachAsmJSArrayBuffer(JSContext* cx, Handle<ArrayBufferObject*> buffer)
{
- for (AsmJSModule* m = cx->runtime()->linkedAsmJSModules; m; m = m->nextLinked()) {
- if (buffer == m->maybeHeapBufferObject() && !m->detachHeap(cx))
+ for (Module* m = cx->runtime()->linkedWasmModules; m; m = m->nextLinked()) {
+ if (buffer == m->maybeBuffer() && !m->detachHeap(cx))
return false;
}
return true;
}
static void
AsmJSModuleObject_finalize(FreeOp* fop, JSObject* obj)
{
- fop->delete_(&obj->as<AsmJSModuleObject>().module());
+ AsmJSModuleObject& moduleObj = obj->as<AsmJSModuleObject>();
+ if (moduleObj.hasModule())
+ fop->delete_(&moduleObj.module());
}
static void
AsmJSModuleObject_trace(JSTracer* trc, JSObject* obj)
{
- obj->as<AsmJSModuleObject>().module().trace(trc);
+ AsmJSModuleObject& moduleObj = obj->as<AsmJSModuleObject>();
+ if (moduleObj.hasModule())
+ moduleObj.module().trace(trc);
}
const Class AsmJSModuleObject::class_ = {
"AsmJSModuleObject",
JSCLASS_IS_ANONYMOUS | JSCLASS_DELAY_METADATA_CALLBACK |
JSCLASS_HAS_RESERVED_SLOTS(AsmJSModuleObject::RESERVED_SLOTS),
nullptr, /* addProperty */
nullptr, /* delProperty */
@@ -980,294 +150,48 @@ const Class AsmJSModuleObject::class_ =
AsmJSModuleObject_finalize,
nullptr, /* call */
nullptr, /* hasInstance */
nullptr, /* construct */
AsmJSModuleObject_trace
};
AsmJSModuleObject*
-AsmJSModuleObject::create(ExclusiveContext* cx, ScopedJSDeletePtr<AsmJSModule>* module)
+AsmJSModuleObject::create(ExclusiveContext* cx)
{
AutoSetNewObjectMetadata metadata(cx);
JSObject* obj = NewObjectWithGivenProto(cx, &AsmJSModuleObject::class_, nullptr);
if (!obj)
return nullptr;
- AsmJSModuleObject* nobj = &obj->as<AsmJSModuleObject>();
+ return &obj->as<AsmJSModuleObject>();
+}
- nobj->setReservedSlot(MODULE_SLOT, PrivateValue(module->forget()));
+bool
+AsmJSModuleObject::hasModule() const
+{
+ MOZ_ASSERT(is<AsmJSModuleObject>());
+ return !getReservedSlot(MODULE_SLOT).isUndefined();
+}
- return nobj;
+void
+AsmJSModuleObject::setModule(AsmJSModule* newModule)
+{
+ MOZ_ASSERT(is<AsmJSModuleObject>());
+ if (hasModule())
+ js_delete(&module());
+ setReservedSlot(MODULE_SLOT, PrivateValue(newModule));
}
AsmJSModule&
AsmJSModuleObject::module() const
{
MOZ_ASSERT(is<AsmJSModuleObject>());
return *(AsmJSModule*)getReservedSlot(MODULE_SLOT).toPrivate();
}
-static inline uint8_t*
-WriteBytes(uint8_t* dst, const void* src, size_t nbytes)
-{
- memcpy(dst, src, nbytes);
- return dst + nbytes;
-}
-
-static inline const uint8_t*
-ReadBytes(const uint8_t* src, void* dst, size_t nbytes)
-{
- memcpy(dst, src, nbytes);
- return src + nbytes;
-}
-
-template <class T>
-static inline uint8_t*
-WriteScalar(uint8_t* dst, T t)
-{
- memcpy(dst, &t, sizeof(t));
- return dst + sizeof(t);
-}
-
-template <class T>
-static inline const uint8_t*
-ReadScalar(const uint8_t* src, T* dst)
-{
- memcpy(dst, src, sizeof(*dst));
- return src + sizeof(*dst);
-}
-
-static size_t
-SerializedNameSize(PropertyName* name)
-{
- size_t s = sizeof(uint32_t);
- if (name)
- s += name->length() * (name->hasLatin1Chars() ? sizeof(Latin1Char) : sizeof(char16_t));
- return s;
-}
-
-size_t
-AsmJSModule::Name::serializedSize() const
-{
- return SerializedNameSize(name_);
-}
-
-static uint8_t*
-SerializeName(uint8_t* cursor, PropertyName* name)
-{
- MOZ_ASSERT_IF(name, !name->empty());
- if (name) {
- static_assert(JSString::MAX_LENGTH <= INT32_MAX, "String length must fit in 31 bits");
- uint32_t length = name->length();
- uint32_t lengthAndEncoding = (length << 1) | uint32_t(name->hasLatin1Chars());
- cursor = WriteScalar<uint32_t>(cursor, lengthAndEncoding);
- JS::AutoCheckCannotGC nogc;
- if (name->hasLatin1Chars())
- cursor = WriteBytes(cursor, name->latin1Chars(nogc), length * sizeof(Latin1Char));
- else
- cursor = WriteBytes(cursor, name->twoByteChars(nogc), length * sizeof(char16_t));
- } else {
- cursor = WriteScalar<uint32_t>(cursor, 0);
- }
- return cursor;
-}
-
-uint8_t*
-AsmJSModule::Name::serialize(uint8_t* cursor) const
-{
- return SerializeName(cursor, name_);
-}
-
-template <typename CharT>
-static const uint8_t*
-DeserializeChars(ExclusiveContext* cx, const uint8_t* cursor, size_t length, PropertyName** name)
-{
- Vector<CharT> tmp(cx);
- CharT* src;
- if ((size_t(cursor) & (sizeof(CharT) - 1)) != 0) {
- // Align 'src' for AtomizeChars.
- if (!tmp.resize(length))
- return nullptr;
- memcpy(tmp.begin(), cursor, length * sizeof(CharT));
- src = tmp.begin();
- } else {
- src = (CharT*)cursor;
- }
-
- JSAtom* atom = AtomizeChars(cx, src, length);
- if (!atom)
- return nullptr;
-
- *name = atom->asPropertyName();
- return cursor + length * sizeof(CharT);
-}
-
-static const uint8_t*
-DeserializeName(ExclusiveContext* cx, const uint8_t* cursor, PropertyName** name)
-{
- uint32_t lengthAndEncoding;
- cursor = ReadScalar<uint32_t>(cursor, &lengthAndEncoding);
-
- uint32_t length = lengthAndEncoding >> 1;
- if (length == 0) {
- *name = nullptr;
- return cursor;
- }
-
- bool latin1 = lengthAndEncoding & 0x1;
- return latin1
- ? DeserializeChars<Latin1Char>(cx, cursor, length, name)
- : DeserializeChars<char16_t>(cx, cursor, length, name);
-}
-
-const uint8_t*
-AsmJSModule::Name::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
- return DeserializeName(cx, cursor, &name_);
-}
-
-bool
-AsmJSModule::Name::clone(ExclusiveContext* cx, Name* out) const
-{
- out->name_ = name_;
- return true;
-}
-
-template <class T, size_t N>
-size_t
-SerializedVectorSize(const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
-{
- size_t size = sizeof(uint32_t);
- for (size_t i = 0; i < vec.length(); i++)
- size += vec[i].serializedSize();
- return size;
-}
-
-template <class T, size_t N>
-uint8_t*
-SerializeVector(uint8_t* cursor, const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
-{
- cursor = WriteScalar<uint32_t>(cursor, vec.length());
- for (size_t i = 0; i < vec.length(); i++)
- cursor = vec[i].serialize(cursor);
- return cursor;
-}
-
-template <class T, size_t N>
-const uint8_t*
-DeserializeVector(ExclusiveContext* cx, const uint8_t* cursor,
- mozilla::Vector<T, N, SystemAllocPolicy>* vec)
-{
- uint32_t length;
- cursor = ReadScalar<uint32_t>(cursor, &length);
- if (!vec->resize(length))
- return nullptr;
- for (size_t i = 0; i < vec->length(); i++) {
- if (!(cursor = (*vec)[i].deserialize(cx, cursor)))
- return nullptr;
- }
- return cursor;
-}
-
-template <class T, size_t N>
-bool
-CloneVector(ExclusiveContext* cx, const mozilla::Vector<T, N, SystemAllocPolicy>& in,
- mozilla::Vector<T, N, SystemAllocPolicy>* out)
-{
- if (!out->resize(in.length()))
- return false;
- for (size_t i = 0; i < in.length(); i++) {
- if (!in[i].clone(cx, &(*out)[i]))
- return false;
- }
- return true;
-}
-
-template <class T, size_t N, class AllocPolicy>
-size_t
-SerializedPodVectorSize(const mozilla::Vector<T, N, AllocPolicy>& vec)
-{
- return sizeof(uint32_t) +
- vec.length() * sizeof(T);
-}
-
-template <class T, size_t N, class AllocPolicy>
-uint8_t*
-SerializePodVector(uint8_t* cursor, const mozilla::Vector<T, N, AllocPolicy>& vec)
-{
- cursor = WriteScalar<uint32_t>(cursor, vec.length());
- cursor = WriteBytes(cursor, vec.begin(), vec.length() * sizeof(T));
- return cursor;
-}
-
-template <class T, size_t N, class AllocPolicy>
-const uint8_t*
-DeserializePodVector(ExclusiveContext* cx, const uint8_t* cursor,
- mozilla::Vector<T, N, AllocPolicy>* vec)
-{
- uint32_t length;
- cursor = ReadScalar<uint32_t>(cursor, &length);
- if (!vec->resize(length))
- return nullptr;
- cursor = ReadBytes(cursor, vec->begin(), length * sizeof(T));
- return cursor;
-}
-
-template <class T, size_t N>
-bool
-ClonePodVector(ExclusiveContext* cx, const mozilla::Vector<T, N, SystemAllocPolicy>& in,
- mozilla::Vector<T, N, SystemAllocPolicy>* out)
-{
- if (!out->resize(in.length()))
- return false;
- PodCopy(out->begin(), in.begin(), in.length());
- return true;
-}
-
-size_t
-SerializedSigSize(const MallocSig& sig)
-{
- return sizeof(ExprType) +
- SerializedPodVectorSize(sig.args());
-}
-
-uint8_t*
-SerializeSig(uint8_t* cursor, const MallocSig& sig)
-{
- cursor = WriteScalar<ExprType>(cursor, sig.ret());
- cursor = SerializePodVector(cursor, sig.args());
- return cursor;
-}
-
-const uint8_t*
-DeserializeSig(ExclusiveContext* cx, const uint8_t* cursor, MallocSig* sig)
-{
- ExprType ret;
- cursor = ReadScalar<ExprType>(cursor, &ret);
-
- MallocSig::ArgVector args;
- cursor = DeserializePodVector(cx, cursor, &args);
- if (!cursor)
- return nullptr;
-
- sig->init(Move(args), ret);
- return cursor;
-}
-
-bool
-CloneSig(ExclusiveContext* cx, const MallocSig& sig, MallocSig* out)
-{
- MallocSig::ArgVector args;
- if (!ClonePodVector(cx, sig.args(), &args))
- return false;
-
- out->init(Move(args), sig.ret());
- return true;
-}
-
uint8_t*
AsmJSModule::Global::serialize(uint8_t* cursor) const
{
cursor = WriteBytes(cursor, &pod, sizeof(pod));
cursor = SerializeName(cursor, name_);
return cursor;
}
@@ -1282,756 +206,144 @@ const uint8_t*
AsmJSModule::Global::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
{
(cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
(cursor = DeserializeName(cx, cursor, &name_));
return cursor;
}
bool
-AsmJSModule::Global::clone(ExclusiveContext* cx, Global* out) const
+AsmJSModule::Global::clone(JSContext* cx, Global* out) const
{
*out = *this;
return true;
}
uint8_t*
-AsmJSModule::Exit::serialize(uint8_t* cursor) const
+AsmJSModule::Export::serialize(uint8_t* cursor) const
{
- cursor = SerializeSig(cursor, sig_);
+ cursor = SerializeName(cursor, name_);
+ cursor = SerializeName(cursor, maybeFieldName_);
cursor = WriteBytes(cursor, &pod, sizeof(pod));
return cursor;
}
size_t
-AsmJSModule::Exit::serializedSize() const
+AsmJSModule::Export::serializedSize() const
{
- return SerializedSigSize(sig_) +
+ return SerializedNameSize(name_) +
+ SerializedNameSize(maybeFieldName_) +
sizeof(pod);
}
const uint8_t*
-AsmJSModule::Exit::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
- (cursor = DeserializeSig(cx, cursor, &sig_)) &&
- (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
- return cursor;
-}
-
-bool
-AsmJSModule::Exit::clone(ExclusiveContext* cx, Exit* out) const
-{
- out->pod = pod;
- return CloneSig(cx, sig_, &out->sig_);
-}
-
-uint8_t*
-AsmJSModule::ExportedFunction::serialize(uint8_t* cursor) const
-{
- cursor = SerializeName(cursor, name_);
- cursor = SerializeName(cursor, maybeFieldName_);
- cursor = SerializeSig(cursor, sig_);
- cursor = WriteBytes(cursor, &pod, sizeof(pod));
- return cursor;
-}
-
-size_t
-AsmJSModule::ExportedFunction::serializedSize() const
-{
- return SerializedNameSize(name_) +
- SerializedNameSize(maybeFieldName_) +
- SerializedSigSize(sig_) +
- sizeof(pod);
-}
-
-const uint8_t*
-AsmJSModule::ExportedFunction::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+AsmJSModule::Export::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
{
(cursor = DeserializeName(cx, cursor, &name_)) &&
(cursor = DeserializeName(cx, cursor, &maybeFieldName_)) &&
- (cursor = DeserializeSig(cx, cursor, &sig_)) &&
(cursor = ReadBytes(cursor, &pod, sizeof(pod)));
return cursor;
}
bool
-AsmJSModule::ExportedFunction::clone(ExclusiveContext* cx, ExportedFunction* out) const
+AsmJSModule::Export::clone(JSContext* cx, Export* out) const
{
out->name_ = name_;
out->maybeFieldName_ = maybeFieldName_;
out->pod = pod;
- return CloneSig(cx, sig_, &out->sig_);
-}
-
-AsmJSModule::CodeRange::CodeRange(uint32_t lineNumber, AsmJSFunctionOffsets offsets)
- : nameIndex_(UINT32_MAX),
- lineNumber_(lineNumber)
-{
- PodZero(&u); // zero padding for Valgrind
- u.kind_ = Function;
-
- MOZ_ASSERT(offsets.nonProfilingEntry - offsets.begin <= UINT8_MAX);
- begin_ = offsets.begin;
- u.func.beginToEntry_ = offsets.nonProfilingEntry - begin_;
-
- MOZ_ASSERT(offsets.nonProfilingEntry < offsets.profilingReturn);
- MOZ_ASSERT(offsets.profilingReturn - offsets.profilingJump <= UINT8_MAX);
- MOZ_ASSERT(offsets.profilingReturn - offsets.profilingEpilogue <= UINT8_MAX);
- profilingReturn_ = offsets.profilingReturn;
- u.func.profilingJumpToProfilingReturn_ = profilingReturn_ - offsets.profilingJump;
- u.func.profilingEpilogueToProfilingReturn_ = profilingReturn_ - offsets.profilingEpilogue;
-
- MOZ_ASSERT(offsets.nonProfilingEntry < offsets.end);
- end_ = offsets.end;
-}
-
-AsmJSModule::CodeRange::CodeRange(Kind kind, AsmJSOffsets offsets)
- : nameIndex_(0),
- lineNumber_(0),
- begin_(offsets.begin),
- profilingReturn_(0),
- end_(offsets.end)
-{
- PodZero(&u); // zero padding for Valgrind
- u.kind_ = kind;
-
- MOZ_ASSERT(begin_ <= end_);
- MOZ_ASSERT(u.kind_ == Entry || u.kind_ == Inline);
-}
-
-AsmJSModule::CodeRange::CodeRange(Kind kind, AsmJSProfilingOffsets offsets)
- : nameIndex_(0),
- lineNumber_(0),
- begin_(offsets.begin),
- profilingReturn_(offsets.profilingReturn),
- end_(offsets.end)
-{
- PodZero(&u); // zero padding for Valgrind
- u.kind_ = kind;
-
- MOZ_ASSERT(begin_ < profilingReturn_);
- MOZ_ASSERT(profilingReturn_ < end_);
- MOZ_ASSERT(u.kind_ == JitFFI || u.kind_ == SlowFFI || u.kind_ == Interrupt);
-}
-
-AsmJSModule::CodeRange::CodeRange(Builtin builtin, AsmJSProfilingOffsets offsets)
- : nameIndex_(0),
- lineNumber_(0),
- begin_(offsets.begin),
- profilingReturn_(offsets.profilingReturn),
- end_(offsets.end)
-{
- PodZero(&u); // zero padding for Valgrind
- u.kind_ = Thunk;
- u.thunk.target_ = uint16_t(builtin);
-
- MOZ_ASSERT(begin_ < profilingReturn_);
- MOZ_ASSERT(profilingReturn_ < end_);
-}
-
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-size_t
-AsmJSModule::ProfiledFunction::serializedSize() const
-{
- return SerializedNameSize(name) +
- sizeof(pod);
-}
-
-uint8_t*
-AsmJSModule::ProfiledFunction::serialize(uint8_t* cursor) const
-{
- cursor = SerializeName(cursor, name);
- cursor = WriteBytes(cursor, &pod, sizeof(pod));
- return cursor;
-}
-
-const uint8_t*
-AsmJSModule::ProfiledFunction::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
- (cursor = DeserializeName(cx, cursor, &name)) &&
- (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
- return cursor;
-}
-#endif
-
-size_t
-AsmJSModule::AbsoluteLinkArray::serializedSize() const
-{
- size_t size = 0;
- for (const OffsetVector& offsets : *this)
- size += SerializedPodVectorSize(offsets);
- return size;
-}
-
-uint8_t*
-AsmJSModule::AbsoluteLinkArray::serialize(uint8_t* cursor) const
-{
- for (const OffsetVector& offsets : *this)
- cursor = SerializePodVector(cursor, offsets);
- return cursor;
-}
-
-const uint8_t*
-AsmJSModule::AbsoluteLinkArray::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
- for (OffsetVector& offsets : *this) {
- cursor = DeserializePodVector(cx, cursor, &offsets);
- if (!cursor)
- return nullptr;
- }
- return cursor;
-}
-
-bool
-AsmJSModule::AbsoluteLinkArray::clone(ExclusiveContext* cx, AbsoluteLinkArray* out) const
-{
- for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
- if (!ClonePodVector(cx, (*this)[imm], &(*out)[imm]))
- return false;
- }
return true;
}
size_t
-AsmJSModule::AbsoluteLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
-{
- size_t size = 0;
- for (const OffsetVector& offsets : *this)
- size += offsets.sizeOfExcludingThis(mallocSizeOf);
- return size;
-}
-
-size_t
-AsmJSModule::FuncPtrTable::serializedSize() const
-{
- return sizeof(pod) +
- SerializedPodVectorSize(elemOffsets_);
-}
-
-uint8_t*
-AsmJSModule::FuncPtrTable::serialize(uint8_t* cursor) const
-{
- cursor = WriteBytes(cursor, &pod, sizeof(pod));
- cursor = SerializePodVector(cursor, elemOffsets_);
- return cursor;
-}
-
-const uint8_t*
-AsmJSModule::FuncPtrTable::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
- (cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
- (cursor = DeserializePodVector(cx, cursor, &elemOffsets_));
- return cursor;
-}
-
-bool
-AsmJSModule::FuncPtrTable::clone(ExclusiveContext* cx, FuncPtrTable* out) const
-{
- out->pod = pod;
- return ClonePodVector(cx, elemOffsets_, &out->elemOffsets_);
-}
-
-size_t
-AsmJSModule::FuncPtrTable::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
-{
- return elemOffsets_.sizeOfExcludingThis(mallocSizeOf);
-}
-
-size_t
-AsmJSModule::StaticLinkData::serializedSize() const
-{
- return sizeof(pod) +
- SerializedPodVectorSize(relativeLinks) +
- absoluteLinks.serializedSize() +
- SerializedVectorSize(funcPtrTables);
-}
-
-uint8_t*
-AsmJSModule::StaticLinkData::serialize(uint8_t* cursor) const
-{
- cursor = WriteBytes(cursor, &pod, sizeof(pod));
- cursor = SerializePodVector(cursor, relativeLinks);
- cursor = absoluteLinks.serialize(cursor);
- cursor = SerializeVector(cursor, funcPtrTables);
- return cursor;
-}
-
-const uint8_t*
-AsmJSModule::StaticLinkData::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
- (cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
- (cursor = DeserializePodVector(cx, cursor, &relativeLinks)) &&
- (cursor = absoluteLinks.deserialize(cx, cursor)) &&
- (cursor = DeserializeVector(cx, cursor, &funcPtrTables));
- return cursor;
-}
-
-bool
-AsmJSModule::StaticLinkData::clone(ExclusiveContext* cx, StaticLinkData* out) const
-{
- out->pod = pod;
- return ClonePodVector(cx, relativeLinks, &out->relativeLinks) &&
- absoluteLinks.clone(cx, &out->absoluteLinks) &&
- CloneVector(cx, funcPtrTables, &out->funcPtrTables);
-}
-
-size_t
-AsmJSModule::StaticLinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
-{
- size_t size = relativeLinks.sizeOfExcludingThis(mallocSizeOf) +
- absoluteLinks.sizeOfExcludingThis(mallocSizeOf) +
- funcPtrTables.sizeOfExcludingThis(mallocSizeOf);
-
- for (const FuncPtrTable& table : funcPtrTables)
- size += table.sizeOfExcludingThis(mallocSizeOf);
-
- return size;
-}
-
-size_t
AsmJSModule::serializedSize() const
{
- return sizeof(pod) +
- pod.codeBytes_ +
+ MOZ_ASSERT(isFinished());
+ return wasm_->serializedSize() +
+ linkData_->serializedSize() +
+ sizeof(pod) +
+ SerializedVectorSize(globals_) +
+ SerializedPodVectorSize(imports_) +
+ SerializedVectorSize(exports_) +
SerializedNameSize(globalArgumentName_) +
SerializedNameSize(importArgumentName_) +
- SerializedNameSize(bufferArgumentName_) +
- SerializedVectorSize(globals_) +
- SerializedVectorSize(exits_) +
- SerializedVectorSize(exports_) +
- SerializedPodVectorSize(callSites_) +
- SerializedPodVectorSize(codeRanges_) +
- SerializedVectorSize(names_) +
- SerializedPodVectorSize(heapAccesses_) +
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
- SerializedVectorSize(profiledFunctions_) +
-#endif
- staticLinkData_.serializedSize();
+ SerializedNameSize(bufferArgumentName_);
}
uint8_t*
AsmJSModule::serialize(uint8_t* cursor) const
{
- MOZ_ASSERT(!dynamicallyLinked_);
- MOZ_ASSERT(!loadedFromCache_);
- MOZ_ASSERT(!profilingEnabled_);
- MOZ_ASSERT(!interrupted_);
-
+ MOZ_ASSERT(isFinished());
+ cursor = wasm_->serialize(cursor);
+ cursor = linkData_->serialize(cursor);
cursor = WriteBytes(cursor, &pod, sizeof(pod));
- cursor = WriteBytes(cursor, code_, pod.codeBytes_);
+ cursor = SerializeVector(cursor, globals_);
+ cursor = SerializePodVector(cursor, imports_);
+ cursor = SerializeVector(cursor, exports_);
cursor = SerializeName(cursor, globalArgumentName_);
cursor = SerializeName(cursor, importArgumentName_);
cursor = SerializeName(cursor, bufferArgumentName_);
- cursor = SerializeVector(cursor, globals_);
- cursor = SerializeVector(cursor, exits_);
- cursor = SerializeVector(cursor, exports_);
- cursor = SerializePodVector(cursor, callSites_);
- cursor = SerializePodVector(cursor, codeRanges_);
- cursor = SerializeVector(cursor, names_);
- cursor = SerializePodVector(cursor, heapAccesses_);
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
- cursor = SerializeVector(cursor, profiledFunctions_);
-#endif
- cursor = staticLinkData_.serialize(cursor);
return cursor;
}
const uint8_t*
AsmJSModule::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
{
+ linkData_ = cx->make_unique<StaticLinkData>();
+ if (!linkData_)
+ return nullptr;
+
// To avoid GC-during-deserialization corner cases, prevent atoms from
// being collected.
AutoKeepAtoms aka(cx->perThreadData);
+ (cursor = Module::deserialize(cx, cursor, &wasm_)) &&
+ (cursor = linkData_->deserialize(cx, cursor)) &&
(cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
- (code_ = AllocateExecutableMemory(cx, pod.totalBytes_)) &&
- (cursor = ReadBytes(cursor, code_, pod.codeBytes_)) &&
+ (cursor = DeserializeVector(cx, cursor, &globals_)) &&
+ (cursor = DeserializePodVector(cx, cursor, &imports_)) &&
+ (cursor = DeserializeVector(cx, cursor, &exports_)) &&
(cursor = DeserializeName(cx, cursor, &globalArgumentName_)) &&
(cursor = DeserializeName(cx, cursor, &importArgumentName_)) &&
- (cursor = DeserializeName(cx, cursor, &bufferArgumentName_)) &&
- (cursor = DeserializeVector(cx, cursor, &globals_)) &&
- (cursor = DeserializeVector(cx, cursor, &exits_)) &&
- (cursor = DeserializeVector(cx, cursor, &exports_)) &&
- (cursor = DeserializePodVector(cx, cursor, &callSites_)) &&
- (cursor = DeserializePodVector(cx, cursor, &codeRanges_)) &&
- (cursor = DeserializeVector(cx, cursor, &names_)) &&
- (cursor = DeserializePodVector(cx, cursor, &heapAccesses_)) &&
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
- (cursor = DeserializeVector(cx, cursor, &profiledFunctions_)) &&
-#endif
- (cursor = staticLinkData_.deserialize(cx, cursor));
-
- loadedFromCache_ = true;
+ (cursor = DeserializeName(cx, cursor, &bufferArgumentName_));
return cursor;
}
bool
-AsmJSModule::clone(JSContext* cx, ScopedJSDeletePtr<AsmJSModule>* moduleOut) const
+AsmJSModule::clone(JSContext* cx, HandleAsmJSModule obj) const
{
- *moduleOut = cx->new_<AsmJSModule>(scriptSource_, srcStart_, srcBodyStart_, pod.strict_,
- pod.canUseSignalHandlers_);
- if (!*moduleOut)
+ auto out = cx->new_<AsmJSModule>(scriptSource(), srcStart_, srcBodyStart_, pod.strict_);
+ if (!out)
return false;
- AsmJSModule& out = **moduleOut;
-
- // Mirror the order of serialize/deserialize in cloning:
+ obj->setModule(out);
- out.pod = pod;
-
- out.code_ = AllocateExecutableMemory(cx, pod.totalBytes_);
- if (!out.code_)
+ out->wasm_ = wasm_->clone(cx, *linkData_);
+ if (!out->wasm_)
return false;
- memcpy(out.code_, code_, pod.codeBytes_);
-
- out.globalArgumentName_ = globalArgumentName_;
- out.importArgumentName_ = importArgumentName_;
- out.bufferArgumentName_ = bufferArgumentName_;
+ out->linkData_ = cx->make_unique<StaticLinkData>();
+ if (!out->linkData_ || !linkData_->clone(cx, out->linkData_.get()))
+ return false;
- if (!CloneVector(cx, globals_, &out.globals_) ||
- !CloneVector(cx, exits_, &out.exits_) ||
- !CloneVector(cx, exports_, &out.exports_) ||
- !ClonePodVector(cx, callSites_, &out.callSites_) ||
- !ClonePodVector(cx, codeRanges_, &out.codeRanges_) ||
- !CloneVector(cx, names_, &out.names_) ||
- !ClonePodVector(cx, heapAccesses_, &out.heapAccesses_) ||
- !staticLinkData_.clone(cx, &out.staticLinkData_))
+ out->pod = pod;
+
+ if (!CloneVector(cx, globals_, &out->globals_) ||
+ !ClonePodVector(cx, imports_, &out->imports_) ||
+ !CloneVector(cx, exports_, &out->exports_))
{
return false;
}
- out.loadedFromCache_ = loadedFromCache_;
- out.profilingEnabled_ = profilingEnabled_;
-
- if (profilingEnabled_) {
- if (!out.profilingLabels_.resize(profilingLabels_.length()))
- return false;
- for (size_t i = 0; i < profilingLabels_.length(); i++) {
- out.profilingLabels_[i] = DuplicateString(cx, profilingLabels_[i].get());
- if (!out.profilingLabels_[i])
- return false;
- }
- }
-
-
- // Delay flushing until dynamic linking.
- AutoFlushICache afc("AsmJSModule::clone", /* inhibit = */ true);
- out.setAutoFlushICacheRange();
-
- out.restoreToInitialState(maybeHeap_, code_, cx);
- out.staticallyLink(cx);
- return true;
-}
-
-bool
-AsmJSModule::changeHeap(Handle<ArrayBufferObject*> newHeap, JSContext* cx)
-{
- MOZ_ASSERT(hasArrayView());
-
- // Content JS should not be able to run (and change heap) from within an
- // interrupt callback, but in case it does, fail to change heap. Otherwise,
- // the heap can change at every single instruction which would prevent
- // future optimizations like heap-base hoisting.
- if (interrupted_)
- return false;
-
- AutoMutateCode amc(cx, *this, "AsmJSModule::changeHeap");
- restoreHeapToInitialState(maybeHeap_);
- initHeap(newHeap, cx);
+ out->globalArgumentName_ = globalArgumentName_;
+ out->importArgumentName_ = importArgumentName_;
+ out->bufferArgumentName_ = bufferArgumentName_;
return true;
}
-size_t
-AsmJSModule::heapLength() const
-{
- MOZ_ASSERT(isDynamicallyLinked());
- return maybeHeap_ ? maybeHeap_->byteLength() : 0;
-}
-
-void
-AsmJSModule::setProfilingEnabled(bool enabled, JSContext* cx)
-{
- MOZ_ASSERT(isDynamicallyLinked());
-
- if (profilingEnabled_ == enabled)
- return;
-
- // When enabled, generate profiling labels for every name in names_ that is
- // the name of some Function CodeRange. This involves malloc() so do it now
- // since, once we start sampling, we'll be in a signal-handing context where
- // we cannot malloc.
- if (enabled) {
- profilingLabels_.resize(names_.length());
- const char* filename = scriptSource_->filename();
- JS::AutoCheckCannotGC nogc;
- for (size_t i = 0; i < codeRanges_.length(); i++) {
- CodeRange& cr = codeRanges_[i];
- if (!cr.isFunction())
- continue;
- unsigned lineno = cr.functionLineNumber();
- PropertyName* name = names_[cr.functionNameIndex()].name();
- profilingLabels_[cr.functionNameIndex()].reset(
- name->hasLatin1Chars()
- ? JS_smprintf("%s (%s:%u)", name->latin1Chars(nogc), filename, lineno)
- : JS_smprintf("%hs (%s:%u)", name->twoByteChars(nogc), filename, lineno));
- }
- } else {
- profilingLabels_.clear();
- }
-
- AutoMutateCode amc(cx, *this, "AsmJSModule::setProfilingEnabled");
-
- // Patch all internal (asm.js->asm.js) callsites to call the profiling
- // prologues:
- for (size_t i = 0; i < callSites_.length(); i++) {
- CallSite& cs = callSites_[i];
- if (cs.kind() != CallSite::Relative)
- continue;
-
- uint8_t* callerRetAddr = code_ + cs.returnAddressOffset();
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
- void* callee = X86Encoding::GetRel32Target(callerRetAddr);
-#elif defined(JS_CODEGEN_ARM)
- uint8_t* caller = callerRetAddr - 4;
- Instruction* callerInsn = reinterpret_cast<Instruction*>(caller);
- BOffImm calleeOffset;
- callerInsn->as<InstBLImm>()->extractImm(&calleeOffset);
- void* callee = calleeOffset.getDest(callerInsn);
-#elif defined(JS_CODEGEN_ARM64)
- MOZ_CRASH();
- void* callee = nullptr;
- (void)callerRetAddr;
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
- uint8_t* instr = callerRetAddr - Assembler::PatchWrite_NearCallSize();
- void* callee = (void*)Assembler::ExtractInstructionImmediate(instr);
-#elif defined(JS_CODEGEN_NONE)
- MOZ_CRASH();
- void* callee = nullptr;
-#else
-# error "Missing architecture"
-#endif
-
- const CodeRange* codeRange = lookupCodeRange(callee);
- if (codeRange->kind() != CodeRange::Function)
- continue;
-
- uint8_t* profilingEntry = code_ + codeRange->profilingEntry();
- uint8_t* entry = code_ + codeRange->entry();
- MOZ_ASSERT_IF(profilingEnabled_, callee == profilingEntry);
- MOZ_ASSERT_IF(!profilingEnabled_, callee == entry);
- uint8_t* newCallee = enabled ? profilingEntry : entry;
-
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
- X86Encoding::SetRel32(callerRetAddr, newCallee);
-#elif defined(JS_CODEGEN_ARM)
- new (caller) InstBLImm(BOffImm(newCallee - caller), Assembler::Always);
-#elif defined(JS_CODEGEN_ARM64)
- (void)newCallee;
- MOZ_CRASH();
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
- Assembler::PatchInstructionImmediate(instr, PatchedImmPtr(newCallee));
-#elif defined(JS_CODEGEN_NONE)
- MOZ_CRASH();
-#else
-# error "Missing architecture"
-#endif
- }
-
- // Update all the addresses in the function-pointer tables to point to the
- // profiling prologues:
- for (FuncPtrTable& funcPtrTable : staticLinkData_.funcPtrTables) {
- auto array = reinterpret_cast<void**>(globalData() + funcPtrTable.globalDataOffset());
- for (size_t i = 0; i < funcPtrTable.elemOffsets().length(); i++) {
- void* callee = array[i];
- const CodeRange* codeRange = lookupCodeRange(callee);
- void* profilingEntry = code_ + codeRange->profilingEntry();
- void* entry = code_ + codeRange->entry();
- MOZ_ASSERT_IF(profilingEnabled_, callee == profilingEntry);
- MOZ_ASSERT_IF(!profilingEnabled_, callee == entry);
- if (enabled)
- array[i] = profilingEntry;
- else
- array[i] = entry;
- }
- }
-
- // Replace all the nops in all the epilogues of asm.js functions with jumps
- // to the profiling epilogues.
- for (size_t i = 0; i < codeRanges_.length(); i++) {
- CodeRange& cr = codeRanges_[i];
- if (!cr.isFunction())
- continue;
- uint8_t* jump = code_ + cr.profilingJump();
- uint8_t* profilingEpilogue = code_ + cr.profilingEpilogue();
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
- // An unconditional jump with a 1 byte offset immediate has the opcode
- // 0x90. The offset is relative to the address of the instruction after
- // the jump. 0x66 0x90 is the canonical two-byte nop.
- ptrdiff_t jumpImmediate = profilingEpilogue - jump - 2;
- MOZ_ASSERT(jumpImmediate > 0 && jumpImmediate <= 127);
- if (enabled) {
- MOZ_ASSERT(jump[0] == 0x66);
- MOZ_ASSERT(jump[1] == 0x90);
- jump[0] = 0xeb;
- jump[1] = jumpImmediate;
- } else {
- MOZ_ASSERT(jump[0] == 0xeb);
- MOZ_ASSERT(jump[1] == jumpImmediate);
- jump[0] = 0x66;
- jump[1] = 0x90;
- }
-#elif defined(JS_CODEGEN_ARM)
- if (enabled) {
- MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstNOP>());
- new (jump) InstBImm(BOffImm(profilingEpilogue - jump), Assembler::Always);
- } else {
- MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstBImm>());
- new (jump) InstNOP();
- }
-#elif defined(JS_CODEGEN_ARM64)
- (void)jump;
- (void)profilingEpilogue;
- MOZ_CRASH();
-#elif defined(JS_CODEGEN_MIPS32)
- Instruction* instr = (Instruction*)jump;
- if (enabled) {
- Assembler::WriteLuiOriInstructions(instr, instr->next(),
- ScratchRegister, (uint32_t)profilingEpilogue);
- instr[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr);
- } else {
- instr[0].makeNop();
- instr[1].makeNop();
- instr[2].makeNop();
- }
-#elif defined(JS_CODEGEN_MIPS64)
- Instruction* instr = (Instruction*)jump;
- if (enabled) {
- Assembler::WriteLoad64Instructions(instr, ScratchRegister, (uint64_t)profilingEpilogue);
- instr[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr);
- } else {
- instr[0].makeNop();
- instr[1].makeNop();
- instr[2].makeNop();
- instr[3].makeNop();
- instr[4].makeNop();
- }
-#elif defined(JS_CODEGEN_NONE)
- MOZ_CRASH();
-#else
-# error "Missing architecture"
-#endif
- }
-
- // Replace all calls to builtins with calls to profiling thunks that push a
- // frame pointer. Since exit unwinding always starts at the caller of fp,
- // this avoids losing the innermost asm.js function.
- for (auto builtin : MakeEnumeratedRange(Builtin::Limit)) {
- auto imm = BuiltinToImmediate(builtin);
- const OffsetVector& offsets = staticLinkData_.absoluteLinks[imm];
- void* from = AddressOf(imm, nullptr);
- void* to = code_ + staticLinkData_.pod.builtinThunkOffsets[builtin];
- if (!enabled)
- Swap(from, to);
- for (size_t j = 0; j < offsets.length(); j++) {
- uint8_t* caller = code_ + offsets[j];
- const AsmJSModule::CodeRange* codeRange = lookupCodeRange(caller);
- if (codeRange->isThunk())
- continue;
- MOZ_ASSERT(codeRange->isFunction());
- Assembler::PatchDataWithValueCheck(CodeLocationLabel(caller),
- PatchedImmPtr(to),
- PatchedImmPtr(from));
- }
- }
-
- profilingEnabled_ = enabled;
-}
-
-static bool
-GetCPUID(uint32_t* cpuId)
-{
- enum Arch {
- X86 = 0x1,
- X64 = 0x2,
- ARM = 0x3,
- MIPS = 0x4,
- MIPS64 = 0x5,
- ARCH_BITS = 3
- };
-
-#if defined(JS_CODEGEN_X86)
- MOZ_ASSERT(uint32_t(CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
- *cpuId = X86 | (uint32_t(CPUInfo::GetSSEVersion()) << ARCH_BITS);
- return true;
-#elif defined(JS_CODEGEN_X64)
- MOZ_ASSERT(uint32_t(CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
- *cpuId = X64 | (uint32_t(CPUInfo::GetSSEVersion()) << ARCH_BITS);
- return true;
-#elif defined(JS_CODEGEN_ARM)
- MOZ_ASSERT(GetARMFlags() <= (UINT32_MAX >> ARCH_BITS));
- *cpuId = ARM | (GetARMFlags() << ARCH_BITS);
- return true;
-#elif defined(JS_CODEGEN_MIPS32)
- MOZ_ASSERT(GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
- *cpuId = MIPS | (GetMIPSFlags() << ARCH_BITS);
- return true;
-#elif defined(JS_CODEGEN_MIPS64)
- MOZ_ASSERT(GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
- *cpuId = MIPS64 | (GetMIPSFlags() << ARCH_BITS);
- return true;
-#else
- return false;
-#endif
-}
-
-class MachineId
-{
- uint32_t cpuId_;
- JS::BuildIdCharVector buildId_;
-
- public:
- bool extractCurrentState(ExclusiveContext* cx) {
- if (!cx->asmJSCacheOps().buildId)
- return false;
- if (!cx->asmJSCacheOps().buildId(&buildId_))
- return false;
- if (!GetCPUID(&cpuId_))
- return false;
- return true;
- }
-
- size_t serializedSize() const {
- return sizeof(uint32_t) +
- SerializedPodVectorSize(buildId_);
- }
-
- uint8_t* serialize(uint8_t* cursor) const {
- cursor = WriteScalar<uint32_t>(cursor, cpuId_);
- cursor = SerializePodVector(cursor, buildId_);
- return cursor;
- }
-
- const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor) {
- (cursor = ReadScalar<uint32_t>(cursor, &cpuId_)) &&
- (cursor = DeserializePodVector(cx, cursor, &buildId_));
- return cursor;
- }
-
- bool operator==(const MachineId& rhs) const {
- return cpuId_ == rhs.cpuId_ &&
- buildId_.length() == rhs.buildId_.length() &&
- PodEqual(buildId_.begin(), rhs.buildId_.begin(), buildId_.length());
- }
- bool operator!=(const MachineId& rhs) const {
- return !(*this == rhs);
- }
-};
-
struct PropertyNameWrapper
{
PropertyName* name;
PropertyNameWrapper()
: name(nullptr)
{}
explicit PropertyNameWrapper(PropertyName* name)
@@ -2193,37 +505,18 @@ class ModuleCharsForLookup : ModuleChars
if (funCtorArgs_[i].name != arg->name())
return false;
}
}
return true;
}
};
-struct ScopedCacheEntryOpenedForWrite
-{
- ExclusiveContext* cx;
- const size_t serializedSize;
- uint8_t* memory;
- intptr_t handle;
-
- ScopedCacheEntryOpenedForWrite(ExclusiveContext* cx, size_t serializedSize)
- : cx(cx), serializedSize(serializedSize), memory(nullptr), handle(-1)
- {}
-
- ~ScopedCacheEntryOpenedForWrite() {
- if (memory)
- cx->asmJSCacheOps().closeEntryForWrite(serializedSize, memory, handle);
- }
-};
-
JS::AsmJSCacheResult
-js::StoreAsmJSModuleInCache(AsmJSParser& parser,
- const AsmJSModule& module,
- ExclusiveContext* cx)
+js::StoreAsmJSModuleInCache(AsmJSParser& parser, const AsmJSModule& module, ExclusiveContext* cx)
{
MachineId machineId;
if (!machineId.extractCurrentState(cx))
return JS::AsmJSCache_InternalError;
ModuleCharsForStore moduleChars;
if (!moduleChars.init(parser))
return JS::AsmJSCache_InternalError;
@@ -2250,41 +543,24 @@ js::StoreAsmJSModuleInCache(AsmJSParser&
cursor = machineId.serialize(cursor);
cursor = moduleChars.serialize(cursor);
cursor = module.serialize(cursor);
MOZ_ASSERT(cursor == entry.memory + serializedSize);
return JS::AsmJSCache_Success;
}
-struct ScopedCacheEntryOpenedForRead
-{
- ExclusiveContext* cx;
- size_t serializedSize;
- const uint8_t* memory;
- intptr_t handle;
-
- explicit ScopedCacheEntryOpenedForRead(ExclusiveContext* cx)
- : cx(cx), serializedSize(0), memory(nullptr), handle(0)
- {}
-
- ~ScopedCacheEntryOpenedForRead() {
- if (memory)
- cx->asmJSCacheOps().closeEntryForRead(serializedSize, memory, handle);
- }
-};
-
bool
-js::LookupAsmJSModuleInCache(ExclusiveContext* cx,
- AsmJSParser& parser,
- ScopedJSDeletePtr<AsmJSModule>* moduleOut,
- ScopedJSFreePtr<char>* compilationTimeReport)
+js::LookupAsmJSModuleInCache(ExclusiveContext* cx, AsmJSParser& parser, HandleAsmJSModule moduleObj,
+ bool* loadedFromCache, UniqueChars* compilationTimeReport)
{
int64_t usecBefore = PRMJ_Now();
+ *loadedFromCache = false;
+
MachineId machineId;
if (!machineId.extractCurrentState(cx))
return true;
JS::OpenAsmJSCacheEntryForReadOp open = cx->asmJSCacheOps().openEntryForRead;
if (!open)
return true;
@@ -2308,44 +584,39 @@ js::LookupAsmJSModuleInCache(ExclusiveCo
cursor = moduleChars.deserialize(cx, cursor);
if (!moduleChars.match(parser))
return true;
uint32_t srcStart = parser.pc->maybeFunction->pn_body->pn_pos.begin;
uint32_t srcBodyStart = parser.tokenStream.currentToken().pos.end;
bool strict = parser.pc->sc->strict() && !parser.pc->sc->hasExplicitUseStrict();
- // canUseSignalHandlers will be clobbered when deserializing and checked below
- ScopedJSDeletePtr<AsmJSModule> module(
- cx->new_<AsmJSModule>(parser.ss, srcStart, srcBodyStart, strict,
- /* canUseSignalHandlers = */ false));
+ AsmJSModule* module = cx->new_<AsmJSModule>(parser.ss, srcStart, srcBodyStart, strict);
if (!module)
return false;
+ moduleObj->setModule(module);
+
cursor = module->deserialize(cx, cursor);
if (!cursor)
return false;
bool atEnd = cursor == entry.memory + entry.serializedSize;
MOZ_ASSERT(atEnd, "Corrupt cache file");
if (!atEnd)
return true;
- if (module->canUseSignalHandlers() != cx->canUseSignalHandlers())
+ if (module->wasm().compileArgs() != CompileArgs(cx))
return true;
+ module->staticallyLink(cx);
+
if (!parser.tokenStream.advance(module->srcEndBeforeCurly()))
return false;
- {
- // Delay flushing until dynamic linking.
- AutoFlushICache afc("LookupAsmJSModuleInCache", /* inhibit = */ true);
- module->setAutoFlushICacheRange();
-
- module->staticallyLink(cx);
- }
+ *loadedFromCache = true;
int64_t usecAfter = PRMJ_Now();
int ms = (usecAfter - usecBefore) / PRMJ_USEC_PER_MSEC;
- *compilationTimeReport = JS_smprintf("loaded from cache in %dms", ms);
- *moduleOut = module.forget();
+ *compilationTimeReport = UniqueChars(JS_smprintf("loaded from cache in %dms", ms));
return true;
}
+
--- a/js/src/asmjs/AsmJSModule.h
+++ b/js/src/asmjs/AsmJSModule.h
@@ -19,32 +19,26 @@
#ifndef asmjs_AsmJSModule_h
#define asmjs_AsmJSModule_h
#include "mozilla/EnumeratedArray.h"
#include "mozilla/Maybe.h"
#include "mozilla/Move.h"
#include "mozilla/PodOperations.h"
-#include "jsscript.h"
-
-#include "asmjs/AsmJSFrameIterator.h"
#include "asmjs/AsmJSValidate.h"
-#include "asmjs/Wasm.h"
+#include "asmjs/WasmModule.h"
#include "builtin/SIMD.h"
#include "gc/Tracer.h"
-#ifdef JS_ION_PERF
-# include "jit/PerfSpewer.h"
-#endif
#include "vm/TypedArrayObject.h"
namespace js {
-namespace frontend { class TokenStream; }
-namespace jit { struct BaselineScript; class MacroAssembler; }
+class AsmJSModuleObject;
+typedef Handle<AsmJSModuleObject*> HandleAsmJSModule;
// The asm.js spec recognizes this set of builtin Math functions.
enum AsmJSMathBuiltinFunction
{
AsmJSMathBuiltin_sin, AsmJSMathBuiltin_cos, AsmJSMathBuiltin_tan,
AsmJSMathBuiltin_asin, AsmJSMathBuiltin_acos, AsmJSMathBuiltin_atan,
AsmJSMathBuiltin_ceil, AsmJSMathBuiltin_floor, AsmJSMathBuiltin_exp,
AsmJSMathBuiltin_log, AsmJSMathBuiltin_pow, AsmJSMathBuiltin_sqrt,
@@ -79,39 +73,31 @@ enum AsmJSSimdType
// Set of known operations, for a given SIMD type (int32x4, float32x4,...)
enum AsmJSSimdOperation
{
#define ASMJSSIMDOPERATION(op) AsmJSSimdOperation_##op,
FORALL_SIMD_OP(ASMJSSIMDOPERATION)
#undef ASMJSSIMDOPERATION
};
-// An asm.js module represents the collection of functions nested inside a
-// single outer "use asm" function. For example, this asm.js module:
-// function() { "use asm"; function f() {} function g() {} return f }
-// contains the functions 'f' and 'g'.
-//
-// An asm.js module contains both the jit-code produced by compiling all the
-// functions in the module as well all the data required to perform the
-// link-time validation step in the asm.js spec.
-//
-// NB: this means that AsmJSModule must be GC-safe.
+// An AsmJSModule extends (via containment) a wasm::Module with the extra persistent state
+// necessary to represent a compiled asm.js module.
class AsmJSModule
{
public:
class Global
{
public:
enum Which { Variable, FFI, ArrayView, ArrayViewCtor, MathBuiltinFunction,
AtomicsBuiltinFunction, Constant, SimdCtor, SimdOperation, ByteLength };
enum VarInitKind { InitConstant, InitImport };
enum ConstantKind { GlobalConstant, MathConstant };
private:
- struct Pod {
+ struct CacheablePod {
Which which_;
union {
struct {
uint32_t globalDataOffset_;
VarInitKind initKind_;
union {
wasm::ValType importType_;
wasm::Val val_;
@@ -240,562 +226,129 @@ class AsmJSModule
MOZ_ASSERT(pod.which_ == Constant);
return pod.u.constant.kind_;
}
double constantValue() const {
MOZ_ASSERT(pod.which_ == Constant);
return pod.u.constant.value_;
}
- size_t serializedSize() const;
- uint8_t* serialize(uint8_t* cursor) const;
- const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
- bool clone(ExclusiveContext* cx, Global* out) const;
- };
-
- // An Exit holds bookkeeping information about an exit; the ExitDatum
- // struct overlays the actual runtime data stored in the global data
- // section.
-
- struct ExitDatum
- {
- uint8_t* exit;
- jit::BaselineScript* baselineScript;
- HeapPtrFunction fun;
+ WASM_DECLARE_SERIALIZABLE(Global);
};
- class Exit
+ typedef Vector<Global, 0, SystemAllocPolicy> GlobalVector;
+
+ class Import
{
- wasm::MallocSig sig_;
- struct Pod {
- unsigned ffiIndex_;
- unsigned globalDataOffset_;
- unsigned interpCodeOffset_;
- unsigned jitCodeOffset_;
- } pod;
-
+ uint32_t ffiIndex_;
public:
- Exit() {}
- Exit(Exit&& rhs) : sig_(Move(rhs.sig_)), pod(rhs.pod) {}
- Exit(wasm::MallocSig&& sig, unsigned ffiIndex, unsigned globalDataOffset)
- : sig_(Move(sig))
- {
- pod.ffiIndex_ = ffiIndex;
- pod.globalDataOffset_ = globalDataOffset;
- pod.interpCodeOffset_ = 0;
- pod.jitCodeOffset_ = 0;
- }
- const wasm::MallocSig& sig() const {
- return sig_;
- }
- unsigned ffiIndex() const {
- return pod.ffiIndex_;
- }
- unsigned globalDataOffset() const {
- return pod.globalDataOffset_;
- }
- void initInterpOffset(unsigned off) {
- MOZ_ASSERT(!pod.interpCodeOffset_);
- pod.interpCodeOffset_ = off;
- }
- void initJitOffset(unsigned off) {
- MOZ_ASSERT(!pod.jitCodeOffset_);
- pod.jitCodeOffset_ = off;
- }
- ExitDatum& datum(const AsmJSModule& module) const {
- return *reinterpret_cast<ExitDatum*>(module.globalData() + pod.globalDataOffset_);
- }
- void initDatum(const AsmJSModule& module) const {
- MOZ_ASSERT(pod.interpCodeOffset_);
- ExitDatum& d = datum(module);
- d.exit = module.codeBase() + pod.interpCodeOffset_;
- d.baselineScript = nullptr;
- d.fun = nullptr;
- }
- bool isOptimized(const AsmJSModule& module) const {
- return datum(module).exit == module.codeBase() + pod.jitCodeOffset_;
- }
- void optimize(const AsmJSModule& module, jit::BaselineScript* baselineScript) const {
- ExitDatum& d = datum(module);
- d.exit = module.codeBase() + pod.jitCodeOffset_;
- d.baselineScript = baselineScript;
- }
- void deoptimize(const AsmJSModule& module) const {
- ExitDatum& d = datum(module);
- d.exit = module.codeBase() + pod.interpCodeOffset_;
- d.baselineScript = nullptr;
- }
-
- size_t serializedSize() const;
- uint8_t* serialize(uint8_t* cursor) const;
- const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
- bool clone(ExclusiveContext* cx, Exit* out) const;
+ Import() = default;
+ explicit Import(uint32_t ffiIndex) : ffiIndex_(ffiIndex) {}
+ uint32_t ffiIndex() const { return ffiIndex_; }
};
- struct EntryArg {
- uint64_t lo;
- uint64_t hi;
- };
+ typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
- typedef int32_t (*CodePtr)(EntryArg* args, uint8_t* global);
-
- class ExportedFunction
+ class Export
{
PropertyName* name_;
PropertyName* maybeFieldName_;
- wasm::MallocSig sig_;
- struct Pod {
- bool isChangeHeap_;
- uint32_t funcIndex_;
- uint32_t codeOffset_;
+ struct CacheablePod {
+ uint32_t wasmIndex_;
uint32_t startOffsetInModule_; // Store module-start-relative offsets
uint32_t endOffsetInModule_; // so preserved by serialization.
} pod;
- friend class AsmJSModule;
-
- ExportedFunction(PropertyName* name, uint32_t funcIndex,
- uint32_t startOffsetInModule, uint32_t endOffsetInModule,
- PropertyName* maybeFieldName,
- wasm::MallocSig&& sig)
- : name_(name),
- maybeFieldName_(maybeFieldName),
- sig_(Move(sig))
- {
- MOZ_ASSERT(name_->isTenured());
- MOZ_ASSERT_IF(maybeFieldName_, maybeFieldName_->isTenured());
- mozilla::PodZero(&pod); // zero padding for Valgrind
- pod.funcIndex_ = funcIndex;
- pod.isChangeHeap_ = false;
- pod.codeOffset_ = UINT32_MAX;
- pod.startOffsetInModule_ = startOffsetInModule;
- pod.endOffsetInModule_ = endOffsetInModule;
- }
-
- ExportedFunction(PropertyName* name,
- uint32_t startOffsetInModule, uint32_t endOffsetInModule,
- PropertyName* maybeFieldName)
+ public:
+ Export() {}
+ Export(PropertyName* name, PropertyName* maybeFieldName, uint32_t wasmIndex,
+ uint32_t startOffsetInModule, uint32_t endOffsetInModule)
: name_(name),
maybeFieldName_(maybeFieldName)
{
MOZ_ASSERT(name_->isTenured());
MOZ_ASSERT_IF(maybeFieldName_, maybeFieldName_->isTenured());
- mozilla::PodZero(&pod); // zero padding for Valgrind
- pod.isChangeHeap_ = true;
+ pod.wasmIndex_ = wasmIndex;
pod.startOffsetInModule_ = startOffsetInModule;
pod.endOffsetInModule_ = endOffsetInModule;
}
void trace(JSTracer* trc) {
TraceManuallyBarrieredEdge(trc, &name_, "asm.js export name");
if (maybeFieldName_)
TraceManuallyBarrieredEdge(trc, &maybeFieldName_, "asm.js export field");
}
- public:
- ExportedFunction() {}
- ExportedFunction(ExportedFunction&& rhs)
- : name_(rhs.name_),
- maybeFieldName_(rhs.maybeFieldName_),
- sig_(mozilla::Move(rhs.sig_))
- {
- mozilla::PodZero(&pod); // zero padding for Valgrind
- pod = rhs.pod;
- }
-
PropertyName* name() const {
return name_;
}
PropertyName* maybeFieldName() const {
return maybeFieldName_;
}
uint32_t startOffsetInModule() const {
return pod.startOffsetInModule_;
}
uint32_t endOffsetInModule() const {
return pod.endOffsetInModule_;
}
+ static const uint32_t ChangeHeap = UINT32_MAX;
bool isChangeHeap() const {
- return pod.isChangeHeap_;
- }
- uint32_t funcIndex() const {
- MOZ_ASSERT(!isChangeHeap());
- return pod.funcIndex_;
- }
- void initCodeOffset(unsigned off) {
- MOZ_ASSERT(!isChangeHeap());
- MOZ_ASSERT(pod.codeOffset_ == UINT32_MAX);
- pod.codeOffset_ = off;
- }
- const wasm::MallocSig& sig() const {
- MOZ_ASSERT(!isChangeHeap());
- return sig_;
- }
-
- size_t serializedSize() const;
- uint8_t* serialize(uint8_t* cursor) const;
- const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
- bool clone(ExclusiveContext* cx, ExportedFunction* out) const;
- };
-
- class CodeRange
- {
- protected:
- uint32_t nameIndex_;
-
- private:
- uint32_t lineNumber_;
- uint32_t begin_;
- uint32_t profilingReturn_;
- uint32_t end_;
- union {
- struct {
- uint8_t kind_;
- uint8_t beginToEntry_;
- uint8_t profilingJumpToProfilingReturn_;
- uint8_t profilingEpilogueToProfilingReturn_;
- } func;
- struct {
- uint8_t kind_;
- uint16_t target_;
- } thunk;
- uint8_t kind_;
- } u;
-
- void assertValid();
-
- public:
- enum Kind { Function, Entry, JitFFI, SlowFFI, Interrupt, Thunk, Inline };
-
- CodeRange() {}
- CodeRange(Kind kind, AsmJSOffsets offsets);
- CodeRange(Kind kind, AsmJSProfilingOffsets offsets);
- CodeRange(wasm::Builtin builtin, AsmJSProfilingOffsets offsets);
- CodeRange(uint32_t lineNumber, AsmJSFunctionOffsets offsets);
-
- Kind kind() const { return Kind(u.kind_); }
- bool isFunction() const { return kind() == Function; }
- bool isEntry() const { return kind() == Entry; }
- bool isFFI() const { return kind() == JitFFI || kind() == SlowFFI; }
- bool isInterrupt() const { return kind() == Interrupt; }
- bool isThunk() const { return kind() == Thunk; }
-
- uint32_t begin() const {
- return begin_;
- }
- uint32_t profilingEntry() const {
- return begin();
- }
- uint32_t entry() const {
- MOZ_ASSERT(isFunction());
- return begin_ + u.func.beginToEntry_;
- }
- uint32_t end() const {
- return end_;
- }
- uint32_t profilingJump() const {
- MOZ_ASSERT(isFunction());
- return profilingReturn_ - u.func.profilingJumpToProfilingReturn_;
+ return pod.wasmIndex_ == ChangeHeap;
}
- uint32_t profilingEpilogue() const {
- MOZ_ASSERT(isFunction());
- return profilingReturn_ - u.func.profilingEpilogueToProfilingReturn_;
- }
- uint32_t profilingReturn() const {
- MOZ_ASSERT(isFunction() || isFFI() || isInterrupt() || isThunk());
- return profilingReturn_;
- }
- void initNameIndex(uint32_t nameIndex) {
- MOZ_ASSERT(nameIndex_ == UINT32_MAX);
- nameIndex_ = nameIndex;
- }
- uint32_t functionNameIndex() const {
- MOZ_ASSERT(isFunction());
- MOZ_ASSERT(nameIndex_ != UINT32_MAX);
- return nameIndex_;
- }
- PropertyName* functionName(const AsmJSModule& module) const {
- return module.names_[functionNameIndex()].name();
- }
- const char* functionProfilingLabel(const AsmJSModule& module) const {
- MOZ_ASSERT(isFunction());
- return module.profilingLabels_[nameIndex_].get();
- }
- uint32_t functionLineNumber() const {
- MOZ_ASSERT(isFunction());
- return lineNumber_;
- }
- void functionOffsetBy(uint32_t offset) {
- MOZ_ASSERT(isFunction());
- begin_ += offset;
- profilingReturn_ += offset;
- end_ += offset;
- }
- wasm::Builtin thunkTarget() const {
- MOZ_ASSERT(isThunk());
- return wasm::Builtin(u.thunk.target_);
- }
- };
-
- class Name
- {
- PropertyName* name_;
- public:
- Name() : name_(nullptr) {}
- MOZ_IMPLICIT Name(PropertyName* name) : name_(name) {}
- PropertyName* name() const { return name_; }
- PropertyName*& name() { return name_; }
- size_t serializedSize() const;
- uint8_t* serialize(uint8_t* cursor) const;
- const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
- bool clone(ExclusiveContext* cx, Name* out) const;
- };
-
- typedef mozilla::UniquePtr<char[], JS::FreePolicy> ProfilingLabel;
-
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
- // Function information to add to the VTune JIT profiler following linking.
- struct ProfiledFunction
- {
- PropertyName* name;
- struct Pod {
- unsigned startCodeOffset;
- unsigned endCodeOffset;
- unsigned lineno;
- unsigned columnIndex;
- } pod;
-
- explicit ProfiledFunction()
- : name(nullptr)
- { }
-
- ProfiledFunction(PropertyName* name, unsigned start, unsigned end,
- unsigned line = 0, unsigned column = 0)
- : name(name)
- {
- MOZ_ASSERT(name->isTenured());
-
- pod.startCodeOffset = start;
- pod.endCodeOffset = end;
- pod.lineno = line;
- pod.columnIndex = column;
- }
-
- void trace(JSTracer* trc) {
- if (name)
- TraceManuallyBarrieredEdge(trc, &name, "asm.js profiled function name");
+ uint32_t wasmIndex() const {
+ MOZ_ASSERT(!isChangeHeap());
+ return pod.wasmIndex_;
}
- size_t serializedSize() const;
- uint8_t* serialize(uint8_t* cursor) const;
- const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
- };
-#endif
-
- struct RelativeLink
- {
- enum Kind
- {
- RawPointer,
- CodeLabel,
- InstructionImmediate
- };
-
- RelativeLink()
- { }
-
-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
- // On MIPS, CodeLabels are instruction immediates so RelativeLinks only
- // patch instruction immediates.
- explicit RelativeLink(Kind kind) {
- MOZ_ASSERT(kind == CodeLabel || kind == InstructionImmediate);
- }
- bool isRawPointerPatch() {
- return false;
- }
-#else
- // On the rest, CodeLabels are raw pointers so RelativeLinks only patch
- // raw pointers.
- explicit RelativeLink(Kind kind) {
- MOZ_ASSERT(kind == CodeLabel || kind == RawPointer);
- }
- bool isRawPointerPatch() {
- return true;
- }
-#endif
-
- uint32_t patchAtOffset;
- uint32_t targetOffset;
- };
-
- typedef Vector<RelativeLink, 0, SystemAllocPolicy> RelativeLinkVector;
-
- typedef mozilla::EnumeratedArray<wasm::Builtin,
- wasm::Builtin::Limit,
- uint32_t> BuiltinThunkOffsetArray;
-
- typedef Vector<uint32_t, 0, SystemAllocPolicy> OffsetVector;
- typedef mozilla::EnumeratedArray<wasm::SymbolicAddress,
- wasm::SymbolicAddress::Limit,
- OffsetVector> OffsetVectorArray;
-
- struct AbsoluteLinkArray : public OffsetVectorArray
- {
- size_t serializedSize() const;
- uint8_t* serialize(uint8_t* cursor) const;
- const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
- bool clone(ExclusiveContext* cx, AbsoluteLinkArray* out) const;
-
- size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ WASM_DECLARE_SERIALIZABLE(Export)
};
- class FuncPtrTable
- {
- struct Pod {
- uint32_t globalDataOffset_;
- } pod;
- OffsetVector elemOffsets_;
-
- public:
- FuncPtrTable() {}
- FuncPtrTable(FuncPtrTable&& rhs) : pod(rhs.pod), elemOffsets_(Move(rhs.elemOffsets_)) {}
- explicit FuncPtrTable(uint32_t globalDataOffset) { pod.globalDataOffset_ = globalDataOffset; }
- void define(OffsetVector&& elemOffsets) { elemOffsets_ = Move(elemOffsets); }
- uint32_t globalDataOffset() const { return pod.globalDataOffset_; }
- const OffsetVector& elemOffsets() const { return elemOffsets_; }
-
- size_t serializedSize() const;
- uint8_t* serialize(uint8_t* cursor) const;
- const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
- bool clone(ExclusiveContext* cx, FuncPtrTable* out) const;
-
- size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
- };
-
- typedef Vector<FuncPtrTable, 0, SystemAllocPolicy> FuncPtrTableVector;
+ typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
- // Static-link data is used to patch a module either after it has been
- // compiled or deserialized with various absolute addresses (of code or
- // data in the process) or relative addresses (of code or data in the same
- // AsmJSModule).
- struct StaticLinkData
- {
- StaticLinkData() { mozilla::PodZero(&pod); }
-
- struct Pod {
- uint32_t interruptExitOffset;
- uint32_t outOfBoundsExitOffset;
- BuiltinThunkOffsetArray builtinThunkOffsets;
- } pod;
-
- RelativeLinkVector relativeLinks;
- AbsoluteLinkArray absoluteLinks;
- FuncPtrTableVector funcPtrTables;
-
- size_t serializedSize() const;
- uint8_t* serialize(uint8_t* cursor) const;
- const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
- bool clone(ExclusiveContext* cx, StaticLinkData* out) const;
-
- size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
- };
+ typedef JS::UniquePtr<wasm::Module, JS::DeletePolicy<wasm::Module>> UniqueWasmModule;
private:
- struct Pod {
- uint32_t functionBytes_;
- uint32_t codeBytes_;
- uint32_t globalBytes_;
- uint32_t totalBytes_;
- uint32_t minHeapLength_;
- uint32_t maxHeapLength_;
- uint32_t heapLengthMask_;
- uint32_t numFFIs_;
- uint32_t srcLength_;
- uint32_t srcLengthWithRightBrace_;
- bool strict_;
- bool hasArrayView_;
- bool isSharedView_;
- bool hasFixedMinHeapLength_;
- bool canUseSignalHandlers_;
+ UniqueWasmModule wasm_;
+ wasm::UniqueStaticLinkData linkData_;
+ struct CacheablePod {
+ uint32_t minHeapLength_;
+ uint32_t maxHeapLength_;
+ uint32_t heapLengthMask_;
+ uint32_t numFFIs_;
+ uint32_t srcLength_;
+ uint32_t srcLengthWithRightBrace_;
+ bool strict_;
+ bool hasArrayView_;
+ bool isSharedView_;
+ bool hasFixedMinHeapLength_;
} pod;
-
- // These two fields need to be kept out pod as they depend on the position
- // of the module within the ScriptSource and thus aren't invariant with
- // respect to caching.
- const uint32_t srcStart_;
- const uint32_t srcBodyStart_;
-
- Vector<Global, 0, SystemAllocPolicy> globals_;
- Vector<Exit, 0, SystemAllocPolicy> exits_;
- Vector<ExportedFunction, 0, SystemAllocPolicy> exports_;
- Vector<wasm::CallSite, 0, SystemAllocPolicy> callSites_;
- Vector<CodeRange, 0, SystemAllocPolicy> codeRanges_;
- Vector<Name, 0, SystemAllocPolicy> names_;
- Vector<ProfilingLabel, 0, SystemAllocPolicy> profilingLabels_;
- Vector<wasm::HeapAccess, 0, SystemAllocPolicy> heapAccesses_;
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
- Vector<ProfiledFunction, 0, SystemAllocPolicy> profiledFunctions_;
-#endif
-
- ScriptSource * scriptSource_;
- PropertyName * globalArgumentName_;
- PropertyName * importArgumentName_;
- PropertyName * bufferArgumentName_;
- uint8_t * code_;
- uint8_t * interruptExit_;
- uint8_t * outOfBoundsExit_;
- StaticLinkData staticLinkData_;
- RelocatablePtrArrayBufferObjectMaybeShared maybeHeap_;
- AsmJSModule ** prevLinked_;
- AsmJSModule * nextLinked_;
- bool dynamicallyLinked_;
- bool loadedFromCache_;
- bool profilingEnabled_;
- bool interrupted_;
-
- void restoreHeapToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer);
- void restoreToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer, uint8_t* prevCode,
- ExclusiveContext* cx);
+ const ScriptSourceHolder scriptSource_;
+ const uint32_t srcStart_;
+ const uint32_t srcBodyStart_;
+ GlobalVector globals_;
+ ImportVector imports_;
+ ExportVector exports_;
+ PropertyName* globalArgumentName_;
+ PropertyName* importArgumentName_;
+ PropertyName* bufferArgumentName_;
public:
explicit AsmJSModule(ScriptSource* scriptSource, uint32_t srcStart, uint32_t srcBodyStart,
- bool strict, bool canUseSignalHandlers);
+ bool strict);
void trace(JSTracer* trc);
- ~AsmJSModule();
-
- // An AsmJSModule transitions from !finished to finished to dynamically linked.
- bool isFinished() const { return !!code_; }
- bool isDynamicallyLinked() const { return dynamicallyLinked_; }
/*************************************************************************/
// These functions may be used as soon as the module is constructed:
ScriptSource* scriptSource() const {
- MOZ_ASSERT(scriptSource_);
- return scriptSource_;
+ return scriptSource_.get();
}
bool strict() const {
return pod.strict_;
}
- bool canUseSignalHandlers() const {
- return pod.canUseSignalHandlers_;
- }
- bool usesSignalHandlersForInterrupt() const {
- return pod.canUseSignalHandlers_;
- }
- bool usesSignalHandlersForOOB() const {
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
- return pod.canUseSignalHandlers_;
-#else
- return false;
-#endif
- }
- bool loadedFromCache() const {
- return loadedFromCache_;
- }
// srcStart() refers to the offset in the ScriptSource to the beginning of
// the asm.js module function. If the function has been created with the
// Function constructor, this will be the first character in the function
// source. Otherwise, it will be the opening parenthesis of the arguments
// list.
uint32_t srcStart() const {
return srcStart_;
@@ -815,24 +368,16 @@ class AsmJSModule
uint32_t maxHeapLength() const {
return pod.maxHeapLength_;
}
uint32_t heapLengthMask() const {
MOZ_ASSERT(pod.hasFixedMinHeapLength_);
return pod.heapLengthMask_;
}
- // about:memory reporting
- void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode,
- size_t* asmJSModuleData);
-
- /*************************************************************************/
- // These functions build the global scope of the module while parsing the
- // module prologue (before the function bodies):
-
void initGlobalArgumentName(PropertyName* n) {
MOZ_ASSERT(!isFinished());
MOZ_ASSERT_IF(n, n->isTenured());
globalArgumentName_ = n;
}
void initImportArgumentName(PropertyName* n) {
MOZ_ASSERT(!isFinished());
MOZ_ASSERT_IF(n, n->isTenured());
@@ -848,59 +393,30 @@ class AsmJSModule
}
PropertyName* importArgumentName() const {
return importArgumentName_;
}
PropertyName* bufferArgumentName() const {
return bufferArgumentName_;
}
- /*************************************************************************/
- // These functions may only be called before finish():
-
- private:
- bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset) {
+ bool addGlobalVarInit(const wasm::Val& v, uint32_t globalDataOffset) {
MOZ_ASSERT(!isFinished());
- uint32_t pad = ComputeByteAlignment(pod.globalBytes_, align);
- if (UINT32_MAX - pod.globalBytes_ < pad + bytes)
- return false;
- pod.globalBytes_ += pad;
- *globalDataOffset = pod.globalBytes_;
- pod.globalBytes_ += bytes;
- return true;
- }
- bool addGlobalVar(wasm::ValType type, uint32_t* globalDataOffset) {
- MOZ_ASSERT(!isFinished());
- unsigned width = 0;
- switch (type) {
- case wasm::ValType::I32: case wasm::ValType::F32: width = 4; break;
- case wasm::ValType::I64: case wasm::ValType::F64: width = 8; break;
- case wasm::ValType::I32x4: case wasm::ValType::F32x4: width = 16; break;
- }
- return allocateGlobalBytes(width, width, globalDataOffset);
- }
- public:
- bool addGlobalVarInit(const wasm::Val& v, uint32_t* globalDataOffset) {
- MOZ_ASSERT(!isFinished());
- if (!addGlobalVar(v.type(), globalDataOffset))
- return false;
Global g(Global::Variable, nullptr);
g.pod.u.var.initKind_ = Global::InitConstant;
g.pod.u.var.u.val_ = v;
- g.pod.u.var.globalDataOffset_ = *globalDataOffset;
+ g.pod.u.var.globalDataOffset_ = globalDataOffset;
return globals_.append(g);
}
- bool addGlobalVarImport(PropertyName* name, wasm::ValType importType, uint32_t* globalDataOffset) {
+ bool addGlobalVarImport(PropertyName* name, wasm::ValType importType, uint32_t globalDataOffset) {
MOZ_ASSERT(!isFinished());
- if (!addGlobalVar(importType, globalDataOffset))
- return false;
Global g(Global::Variable, name);
g.pod.u.var.initKind_ = Global::InitImport;
g.pod.u.var.u.importType_ = importType;
- g.pod.u.var.globalDataOffset_ = *globalDataOffset;
+ g.pod.u.var.globalDataOffset_ = globalDataOffset;
return globals_.append(g);
}
bool addFFI(PropertyName* field, uint32_t* ffiIndex) {
MOZ_ASSERT(!isFinished());
if (pod.numFFIs_ == UINT32_MAX)
return false;
Global g(Global::FFI, field);
g.pod.u.ffiIndex_ = *ffiIndex = pod.numFFIs_++;
@@ -961,416 +477,153 @@ class AsmJSModule
}
bool addGlobalConstant(double value, PropertyName* name) {
MOZ_ASSERT(!isFinished());
Global g(Global::Constant, name);
g.pod.u.constant.value_ = value;
g.pod.u.constant.kind_ = Global::GlobalConstant;
return globals_.append(g);
}
- unsigned numGlobals() const {
- return globals_.length();
- }
- Global& global(unsigned i) {
- return globals_[i];
- }
- void setViewsAreShared() {
- if (pod.hasArrayView_)
- pod.isSharedView_ = true;
+ bool addImport(uint32_t ffiIndex, uint32_t importIndex) {
+ MOZ_ASSERT(imports_.length() == importIndex);
+ return imports_.emplaceBack(ffiIndex);
}
-
- /*************************************************************************/
- // These functions are called while parsing/compiling function bodies:
-
- bool hasArrayView() const {
- return pod.hasArrayView_;
+ bool addExport(PropertyName* name, PropertyName* maybeFieldName, uint32_t wasmIndex,
+ uint32_t funcSrcBegin, uint32_t funcSrcEnd)
+ {
+ // NB: funcSrcBegin/funcSrcEnd are given relative to the ScriptSource
+ // (the entire file) and ExportedFunctions store offsets relative to
+ // the beginning of the module (so that they are caching-invariant).
+ MOZ_ASSERT(!isFinished());
+ MOZ_ASSERT(srcStart_ < funcSrcBegin);
+ MOZ_ASSERT(funcSrcBegin < funcSrcEnd);
+ return exports_.emplaceBack(name, maybeFieldName, wasmIndex,
+ funcSrcBegin - srcStart_, funcSrcEnd - srcStart_);
}
- bool isSharedView() const {
- MOZ_ASSERT(pod.hasArrayView_);
- return pod.isSharedView_;
- }
- void addChangeHeap(uint32_t mask, uint32_t min, uint32_t max) {
+ bool addChangeHeap(uint32_t mask, uint32_t min, uint32_t max) {
MOZ_ASSERT(!isFinished());
MOZ_ASSERT(!pod.hasFixedMinHeapLength_);
MOZ_ASSERT(IsValidAsmJSHeapLength(mask + 1));
MOZ_ASSERT(min >= RoundUpToNextValidAsmJSHeapLength(0));
MOZ_ASSERT(max <= pod.maxHeapLength_);
MOZ_ASSERT(min <= max);
pod.heapLengthMask_ = mask;
pod.minHeapLength_ = min;
pod.maxHeapLength_ = max;
pod.hasFixedMinHeapLength_ = true;
+ return true;
+ }
+
+ const GlobalVector& globals() const {
+ return globals_;
+ }
+ const ImportVector& imports() const {
+ return imports_;
+ }
+ const ExportVector& exports() const {
+ return exports_;
+ }
+
+ void setViewsAreShared() {
+ if (pod.hasArrayView_)
+ pod.isSharedView_ = true;
+ }
+ bool hasArrayView() const {
+ return pod.hasArrayView_;
+ }
+ bool isSharedView() const {
+ return pod.isSharedView_;
}
bool tryRequireHeapLengthToBeAtLeast(uint32_t len) {
MOZ_ASSERT(!isFinished());
if (pod.hasFixedMinHeapLength_ && len > pod.minHeapLength_)
return false;
if (len > pod.maxHeapLength_)
return false;
len = RoundUpToNextValidAsmJSHeapLength(len);
if (len > pod.minHeapLength_)
pod.minHeapLength_ = len;
return true;
}
- bool addCodeRange(CodeRange::Kind kind, AsmJSOffsets offsets) {
- return codeRanges_.append(CodeRange(kind, offsets));
- }
- bool addCodeRange(CodeRange::Kind kind, AsmJSProfilingOffsets offsets) {
- return codeRanges_.append(CodeRange(kind, offsets));
- }
- bool addFunctionCodeRange(PropertyName* name, CodeRange codeRange) {
- MOZ_ASSERT(!isFinished());
- MOZ_ASSERT(name->isTenured());
- if (names_.length() >= UINT32_MAX)
- return false;
- codeRange.initNameIndex(names_.length());
- return names_.append(name) && codeRanges_.append(codeRange);
- }
- bool addBuiltinThunkCodeRange(wasm::Builtin builtin, AsmJSProfilingOffsets offsets) {
- MOZ_ASSERT(staticLinkData_.pod.builtinThunkOffsets[builtin] == 0);
- staticLinkData_.pod.builtinThunkOffsets[builtin] = offsets.begin;
- return codeRanges_.append(CodeRange(builtin, offsets));
- }
- bool addExit(wasm::MallocSig&& sig, unsigned ffiIndex, unsigned* exitIndex) {
- MOZ_ASSERT(!isFinished());
- static_assert(sizeof(ExitDatum) % sizeof(void*) == 0, "word aligned");
- uint32_t globalDataOffset;
- if (!allocateGlobalBytes(sizeof(ExitDatum), sizeof(void*), &globalDataOffset))
- return false;
- *exitIndex = unsigned(exits_.length());
- return exits_.append(Exit(Move(sig), ffiIndex, globalDataOffset));
- }
- unsigned numExits() const {
- return exits_.length();
- }
- Exit& exit(unsigned i) {
- return exits_[i];
- }
- const Exit& exit(unsigned i) const {
- return exits_[i];
- }
- bool declareFuncPtrTable(unsigned numElems, uint32_t* funcPtrTableIndex) {
- MOZ_ASSERT(!isFinished());
- MOZ_ASSERT(IsPowerOfTwo(numElems));
- uint32_t globalDataOffset;
- if (!allocateGlobalBytes(numElems * sizeof(void*), sizeof(void*), &globalDataOffset))
- return false;
- *funcPtrTableIndex = staticLinkData_.funcPtrTables.length();
- return staticLinkData_.funcPtrTables.append(FuncPtrTable(globalDataOffset));
- }
- FuncPtrTable& funcPtrTable(uint32_t funcPtrTableIndex) {
- return staticLinkData_.funcPtrTables[funcPtrTableIndex];
- }
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
- bool addProfiledFunction(ProfiledFunction func) {
- MOZ_ASSERT(!isFinished());
- return profiledFunctions_.append(func);
- }
- unsigned numProfiledFunctions() const {
- return profiledFunctions_.length();
- }
- ProfiledFunction& profiledFunction(unsigned i) {
- return profiledFunctions_[i];
- }
-#endif
-
- bool addExportedFunction(PropertyName* name,
- uint32_t funcIndex,
- uint32_t funcSrcBegin,
- uint32_t funcSrcEnd,
- PropertyName* maybeFieldName,
- wasm::MallocSig&& sig)
- {
- // NB: funcSrcBegin/funcSrcEnd are given relative to the ScriptSource
- // (the entire file) and ExportedFunctions store offsets relative to
- // the beginning of the module (so that they are caching-invariant).
- MOZ_ASSERT(!isFinished());
- MOZ_ASSERT(srcStart_ < funcSrcBegin);
- MOZ_ASSERT(funcSrcBegin < funcSrcEnd);
- ExportedFunction func(name, funcIndex, funcSrcBegin - srcStart_, funcSrcEnd - srcStart_,
- maybeFieldName, mozilla::Move(sig));
- return exports_.length() < UINT32_MAX && exports_.append(mozilla::Move(func));
- }
- bool addExportedChangeHeap(PropertyName* name,
- uint32_t funcSrcBegin,
- uint32_t funcSrcEnd,
- PropertyName* maybeFieldName)
- {
- // See addExportedFunction.
- MOZ_ASSERT(!isFinished());
- MOZ_ASSERT(srcStart_ < funcSrcBegin);
- MOZ_ASSERT(funcSrcBegin < funcSrcEnd);
- ExportedFunction func(name, funcSrcBegin - srcStart_, funcSrcEnd - srcStart_,
- maybeFieldName);
- return exports_.length() < UINT32_MAX && exports_.append(mozilla::Move(func));
- }
- unsigned numExportedFunctions() const {
- return exports_.length();
- }
- const ExportedFunction& exportedFunction(unsigned i) const {
- return exports_[i];
- }
- ExportedFunction& exportedFunction(unsigned i) {
- return exports_[i];
- }
- void setAsyncInterruptOffset(uint32_t o) {
- staticLinkData_.pod.interruptExitOffset = o;
- }
- void setOnOutOfBoundsExitOffset(uint32_t o) {
- staticLinkData_.pod.outOfBoundsExitOffset = o;
- }
/*************************************************************************/
+ // A module isFinished() when compilation completes. After being finished,
+ // a module must be statically and dynamically linked before execution.
- // finish() is called once the entire module has been parsed (via
- // tokenStream) and all function and entry/exit trampolines have been
- // generated (via masm). After this function, the module must still be
- // statically and dynamically linked before code can be run.
- bool finish(ExclusiveContext* cx, frontend::TokenStream& ts, jit::MacroAssembler& masm);
+ bool isFinished() const {
+ return !!wasm_;
+ }
+ void finish(wasm::Module* wasm, wasm::UniqueStaticLinkData linkData,
+ uint32_t endBeforeCurly, uint32_t endAfterCurly);
/*************************************************************************/
- // These accessor functions can be used after finish():
-
- uint8_t* codeBase() const {
- MOZ_ASSERT(isFinished());
- MOZ_ASSERT(uintptr_t(code_) % AsmJSPageSize == 0);
- return code_;
- }
- uint32_t codeBytes() const {
- MOZ_ASSERT(isFinished());
- return pod.codeBytes_;
- }
- bool containsCodePC(void* pc) const {
- MOZ_ASSERT(isFinished());
- return pc >= code_ && pc < (code_ + codeBytes());
- }
+ // These accessor functions can only be used after finish():
- // The range [0, functionBytes) is a subrange of [0, codeBytes) that
- // contains only function body code, not the stub code. This distinction is
- // used by the async interrupt handler to only interrupt when the pc is in
- // function code which, in turn, simplifies reasoning about how stubs
- // enter/exit.
- void setFunctionBytes(uint32_t functionBytes) {
- MOZ_ASSERT(!isFinished());
- MOZ_ASSERT(!pod.functionBytes_);
- pod.functionBytes_ = functionBytes;
- }
- uint32_t functionBytes() const {
+ wasm::Module& wasm() const {
MOZ_ASSERT(isFinished());
- return pod.functionBytes_;
- }
- bool containsFunctionPC(void* pc) const {
- MOZ_ASSERT(isFinished());
- return pc >= code_ && pc < (code_ + functionBytes());
+ return *wasm_;
}
-
- uint32_t globalBytes() const {
- MOZ_ASSERT(isFinished());
- return pod.globalBytes_;
- }
-
- unsigned numFFIs() const {
+ uint32_t numFFIs() const {
MOZ_ASSERT(isFinished());
return pod.numFFIs_;
}
uint32_t srcEndBeforeCurly() const {
MOZ_ASSERT(isFinished());
return srcStart_ + pod.srcLength_;
}
uint32_t srcEndAfterCurly() const {
MOZ_ASSERT(isFinished());
return srcStart_ + pod.srcLengthWithRightBrace_;
}
-
- // Lookup a callsite by the return pc (from the callee to the caller).
- // Return null if no callsite was found.
- const wasm::CallSite* lookupCallSite(void* returnAddress) const;
-
- // Lookup the name the code range containing the given pc. Return null if no
- // code range was found.
- const CodeRange* lookupCodeRange(void* pc) const;
-
- // Lookup a heap access site by the pc which performs the access. Return
- // null if no heap access was found.
- const wasm::HeapAccess* lookupHeapAccess(void* pc) const;
-
- // The global data section is placed after the executable code (i.e., at
- // offset codeBytes_) in the module's linear allocation. The global data
- // starts with some fixed allocations followed by interleaved global,
- // function-pointer table and exit allocations.
- uint32_t offsetOfGlobalData() const {
- MOZ_ASSERT(isFinished());
- return pod.codeBytes_;
- }
- uint8_t* globalData() const {
- MOZ_ASSERT(isFinished());
- return codeBase() + offsetOfGlobalData();
- }
- static void assertGlobalDataOffsets() {
- static_assert(wasm::ActivationGlobalDataOffset == 0,
- "an AsmJSActivation* data goes first");
- static_assert(wasm::HeapGlobalDataOffset == wasm::ActivationGlobalDataOffset + sizeof(void*),
- "then a pointer to the heap*");
- static_assert(wasm::NaN64GlobalDataOffset == wasm::HeapGlobalDataOffset + sizeof(uint8_t*),
- "then a 64-bit NaN");
- static_assert(wasm::NaN32GlobalDataOffset == wasm::NaN64GlobalDataOffset + sizeof(double),
- "then a 32-bit NaN");
- static_assert(sInitialGlobalDataBytes == wasm::NaN32GlobalDataOffset + sizeof(float),
- "then all the normal global data (globals, exits, func-ptr-tables)");
- }
- static const uint32_t sInitialGlobalDataBytes = wasm::NaN32GlobalDataOffset + sizeof(float);
-
- AsmJSActivation*& activation() const {
- MOZ_ASSERT(isFinished());
- return *(AsmJSActivation**)(globalData() + wasm::ActivationGlobalDataOffset);
- }
- bool active() const {
- return activation() != nullptr;
- }
- private:
- // The pointer may reference shared memory, use with care.
- // Generally you want to use maybeHeap(), not heapDatum().
- uint8_t*& heapDatum() const {
- MOZ_ASSERT(isFinished());
- return *(uint8_t**)(globalData() + wasm::HeapGlobalDataOffset);
- }
- public:
-
- /*************************************************************************/
- // These functions are called after finish() but before staticallyLink():
-
- bool addRelativeLink(RelativeLink link) {
- MOZ_ASSERT(isFinished());
- return staticLinkData_.relativeLinks.append(link);
+ bool staticallyLink(ExclusiveContext* cx) {
+ return wasm_->staticallyLink(cx, *linkData_);
}
- // A module is serialized after it is finished but before it is statically
- // linked. (Technically, it could be serialized after static linking, but it
- // would still need to be statically linked on deserialization.)
+ // See WASM_DECLARE_SERIALIZABLE.
size_t serializedSize() const;
uint8_t* serialize(uint8_t* cursor) const;
const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
-
- // Additionally, this function is called to flush the i-cache after
- // deserialization and cloning (but still before static linking, to prevent
- // a bunch of expensive micro-flushes).
- void setAutoFlushICacheRange();
-
- /*************************************************************************/
-
- // After a module is finished compiling or deserializing, it is "statically
- // linked" which specializes the code to its current address (this allows
- // code to be relocated between serialization and deserialization).
- void staticallyLink(ExclusiveContext* cx);
-
- // After a module is statically linked, it is "dynamically linked" which
- // specializes it to a particular set of arguments. In particular, this
- // binds the code to a particular heap (via initHeap) and set of global
- // variables. A given asm.js module cannot be dynamically linked more than
- // once so, if JS tries, the module is cloned. When linked, an asm.js module
- // is kept in a list so that it can be updated if the linked buffer is
- // detached.
- void setIsDynamicallyLinked(JSRuntime* rt) {
- MOZ_ASSERT(isFinished());
- MOZ_ASSERT(!isDynamicallyLinked());
- dynamicallyLinked_ = true;
- nextLinked_ = rt->linkedAsmJSModules;
- prevLinked_ = &rt->linkedAsmJSModules;
- if (nextLinked_)
- nextLinked_->prevLinked_ = &nextLinked_;
- rt->linkedAsmJSModules = this;
- MOZ_ASSERT(isDynamicallyLinked());
- }
-
- void initHeap(Handle<ArrayBufferObjectMaybeShared*> heap, JSContext* cx);
- bool changeHeap(Handle<ArrayBufferObject*> newHeap, JSContext* cx);
- bool detachHeap(JSContext* cx);
-
- bool clone(JSContext* cx, ScopedJSDeletePtr<AsmJSModule>* moduleOut) const;
-
- /*************************************************************************/
- // Functions that can be called after dynamic linking succeeds:
-
- AsmJSModule* nextLinked() const {
- MOZ_ASSERT(isDynamicallyLinked());
- return nextLinked_;
- }
- bool hasDetachedHeap() const {
- MOZ_ASSERT(isDynamicallyLinked());
- return hasArrayView() && !heapDatum();
- }
- CodePtr entryTrampoline(const ExportedFunction& func) const {
- MOZ_ASSERT(isDynamicallyLinked());
- MOZ_ASSERT(!func.isChangeHeap());
- return JS_DATA_TO_FUNC_PTR(CodePtr, code_ + func.pod.codeOffset_);
- }
- uint8_t* interruptExit() const {
- MOZ_ASSERT(isDynamicallyLinked());
- return interruptExit_;
- }
- uint8_t* outOfBoundsExit() const {
- MOZ_ASSERT(isDynamicallyLinked());
- return outOfBoundsExit_;
- }
- SharedMem<uint8_t*> maybeHeap() const {
- MOZ_ASSERT(isDynamicallyLinked());
- return hasArrayView() && isSharedView() ? SharedMem<uint8_t*>::shared(heapDatum())
- : SharedMem<uint8_t*>::unshared(heapDatum());
- }
- ArrayBufferObjectMaybeShared* maybeHeapBufferObject() const {
- MOZ_ASSERT(isDynamicallyLinked());
- return maybeHeap_;
- }
- size_t heapLength() const;
- bool profilingEnabled() const {
- MOZ_ASSERT(isDynamicallyLinked());
- return profilingEnabled_;
- }
- void setProfilingEnabled(bool enabled, JSContext* cx);
- void setInterrupted(bool interrupted) {
- MOZ_ASSERT(isDynamicallyLinked());
- interrupted_ = interrupted;
- }
+ bool clone(JSContext* cx, HandleAsmJSModule moduleObj) const;
+ void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode,
+ size_t* asmJSModuleData);
};
// Store the just-parsed module in the cache using AsmJSCacheOps.
extern JS::AsmJSCacheResult
StoreAsmJSModuleInCache(AsmJSParser& parser,
const AsmJSModule& module,
ExclusiveContext* cx);
// Attempt to load the asm.js module that is about to be parsed from the cache
-// using AsmJSCacheOps. On cache hit, *module will be non-null. Note: the
-// return value indicates whether or not an error was encountered, not whether
-// there was a cache hit.
+// using AsmJSCacheOps. The return value indicates whether an error was
+// reported. The loadedFromCache outparam indicates whether the module was
+// successfully loaded and stored in moduleObj.extern bool
extern bool
-LookupAsmJSModuleInCache(ExclusiveContext* cx,
- AsmJSParser& parser,
- ScopedJSDeletePtr<AsmJSModule>* module,
- ScopedJSFreePtr<char>* compilationTimeReport);
+LookupAsmJSModuleInCache(ExclusiveContext* cx, AsmJSParser& parser, HandleAsmJSModule moduleObj,
+ bool* loadedFromCache, UniqueChars* compilationTimeReport);
// This function must be called for every detached ArrayBuffer.
extern bool
OnDetachAsmJSArrayBuffer(JSContext* cx, Handle<ArrayBufferObject*> buffer);
// An AsmJSModuleObject is an internal implementation object (i.e., not exposed
// directly to user script) which manages the lifetime of an AsmJSModule. A
// JSObject is necessary since we want LinkAsmJS/CallAsmJS JSFunctions to be
// able to point to their module via their extended slots.
class AsmJSModuleObject : public NativeObject
{
static const unsigned MODULE_SLOT = 0;
public:
static const unsigned RESERVED_SLOTS = 1;
- // On success, return an AsmJSModuleClass JSObject that has taken ownership
- // (and release()ed) the given module.
- static AsmJSModuleObject* create(ExclusiveContext* cx, ScopedJSDeletePtr<AsmJSModule>* module);
+ static AsmJSModuleObject* create(ExclusiveContext* cx);
+ bool hasModule() const;
+ void setModule(AsmJSModule* module);
AsmJSModule& module() const;
void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode,
size_t* asmJSModuleData) {
module().addSizeOfMisc(mallocSizeOf, asmJSModuleCode, asmJSModuleData);
}
static const Class class_;
--- a/js/src/asmjs/AsmJSValidate.cpp
+++ b/js/src/asmjs/AsmJSValidate.cpp
@@ -14,17 +14,16 @@
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asmjs/AsmJSValidate.h"
#include "mozilla/Move.h"
-#include "mozilla/UniquePtr.h"
#include "jsmath.h"
#include "jsprf.h"
#include "jsutil.h"
#include "asmjs/AsmJSLink.h"
#include "asmjs/AsmJSModule.h"
#include "asmjs/WasmGenerator.h"
@@ -44,17 +43,16 @@ using namespace js::frontend;
using namespace js::jit;
using namespace js::wasm;
using mozilla::HashGeneric;
using mozilla::IsNaN;
using mozilla::IsNegativeZero;
using mozilla::Move;
using mozilla::PositiveInfinity;
-using mozilla::UniquePtr;
using JS::AsmJSOption;
using JS::GenericNaN;
/*****************************************************************************/
// ParseNode utilities
static inline ParseNode*
NextNode(ParseNode* pn)
@@ -1137,23 +1135,23 @@ class MOZ_STACK_CLASS ModuleValidator
ArrayView(PropertyName* name, Scalar::Type type)
: name(name), type(type)
{}
PropertyName* name;
Scalar::Type type;
};
- class ExitDescriptor
+ class ImportDescriptor
{
PropertyName* name_;
const LifoSig* sig_;
public:
- ExitDescriptor(PropertyName* name, const LifoSig& sig)
+ ImportDescriptor(PropertyName* name, const LifoSig& sig)
: name_(name), sig_(&sig)
{}
PropertyName* name() const {
return name_;
}
const LifoSig& sig() const {
return *sig_;
@@ -1162,71 +1160,72 @@ class MOZ_STACK_CLASS ModuleValidator
struct Lookup { // implements HashPolicy
PropertyName* name_;
const MallocSig& sig_;
Lookup(PropertyName* name, const MallocSig& sig) : name_(name), sig_(sig) {}
};
static HashNumber hash(const Lookup& l) {
return HashGeneric(l.name_, l.sig_.hash());
}
- static bool match(const ExitDescriptor& lhs, const Lookup& rhs) {
+ static bool match(const ImportDescriptor& lhs, const Lookup& rhs) {
return lhs.name_ == rhs.name_ && *lhs.sig_ == rhs.sig_;
}
};
private:
typedef HashMap<PropertyName*, Global*> GlobalMap;
typedef HashMap<PropertyName*, MathBuiltin> MathNameMap;
typedef HashMap<PropertyName*, AsmJSAtomicsBuiltinFunction> AtomicsNameMap;
typedef HashMap<PropertyName*, AsmJSSimdOperation> SimdOperationNameMap;
typedef Vector<ArrayView> ArrayViewVector;
public:
- typedef HashMap<ExitDescriptor, unsigned, ExitDescriptor> ExitMap;
+ typedef HashMap<ImportDescriptor, unsigned, ImportDescriptor> ImportMap;
private:
- ExclusiveContext* cx_;
- AsmJSParser& parser_;
-
- ModuleGenerator mg_;
-
- LifoAlloc validationLifo_;
- FuncVector functions_;
- FuncPtrTableVector funcPtrTables_;
- GlobalMap globals_;
- ArrayViewVector arrayViews_;
- ExitMap exits_;
-
- MathNameMap standardLibraryMathNames_;
- AtomicsNameMap standardLibraryAtomicsNames_;
- SimdOperationNameMap standardLibrarySimdOpNames_;
-
- ParseNode* moduleFunctionNode_;
- PropertyName* moduleFunctionName_;
-
- UniquePtr<char[], JS::FreePolicy> errorString_;
- uint32_t errorOffset_;
- bool errorOverRecursed_;
-
- bool canValidateChangeHeap_;
- bool hasChangeHeap_;
- bool supportsSimd_;
- bool atomicsPresent_;
+ ExclusiveContext* cx_;
+ AsmJSParser& parser_;
+
+ ModuleGenerator mg_;
+ AsmJSModule* module_;
+
+ LifoAlloc validationLifo_;
+ FuncVector functions_;
+ FuncPtrTableVector funcPtrTables_;
+ GlobalMap globals_;
+ ArrayViewVector arrayViews_;
+ ImportMap imports_;
+
+ MathNameMap standardLibraryMathNames_;
+ AtomicsNameMap standardLibraryAtomicsNames_;
+ SimdOperationNameMap standardLibrarySimdOpNames_;
+
+ ParseNode* moduleFunctionNode_;
+ PropertyName* moduleFunctionName_;
+
+ UniqueChars errorString_;
+ uint32_t errorOffset_;
+ bool errorOverRecursed_;
+
+ bool canValidateChangeHeap_;
+ bool hasChangeHeap_;
+ bool supportsSimd_;
+ bool atomicsPresent_;
public:
ModuleValidator(ExclusiveContext* cx, AsmJSParser& parser)
: cx_(cx),
parser_(parser),
mg_(cx),
validationLifo_(VALIDATION_LIFO_DEFAULT_CHUNK_SIZE),
functions_(cx),
funcPtrTables_(cx),
globals_(cx),
arrayViews_(cx),
- exits_(cx),
+ imports_(cx),
standardLibraryMathNames_(cx),
standardLibraryAtomicsNames_(cx),
standardLibrarySimdOpNames_(cx),
moduleFunctionNode_(parser.pc->maybeFunction),
moduleFunctionName_(nullptr),
errorString_(nullptr),
errorOffset_(UINT32_MAX),
errorOverRecursed_(false),
@@ -1276,18 +1275,18 @@ class MOZ_STACK_CLASS ModuleValidator
JSAtom* atom = Atomize(cx_, name, strlen(name));
if (!atom)
return false;
return standardLibrarySimdOpNames_.putNew(atom->asPropertyName(), op);
}
public:
- bool init() {
- if (!globals_.init() || !exits_.init())
+ bool init(HandleAsmJSModule moduleObj) {
+ if (!globals_.init() || !imports_.init())
return false;
if (!standardLibraryMathNames_.init() ||
!addStandardLibraryMathName("sin", AsmJSMathBuiltin_sin) ||
!addStandardLibraryMathName("cos", AsmJSMathBuiltin_cos) ||
!addStandardLibraryMathName("tan", AsmJSMathBuiltin_tan) ||
!addStandardLibraryMathName("asin", AsmJSMathBuiltin_asin) ||
!addStandardLibraryMathName("acos", AsmJSMathBuiltin_acos) ||
@@ -1344,183 +1343,196 @@ class MOZ_STACK_CLASS ModuleValidator
uint32_t srcStart = parser_.pc->maybeFunction->pn_body->pn_pos.begin;
uint32_t srcBodyStart = tokenStream().currentToken().pos.end;
// "use strict" should be added to the source if we are in an implicit
// strict context, see also comment above addUseStrict in
// js::FunctionToString.
bool strict = parser_.pc->sc->strict() && !parser_.pc->sc->hasExplicitUseStrict();
- return mg_.init(parser_.ss, srcStart, srcBodyStart, strict);
- }
-
- bool finish(ScopedJSDeletePtr<AsmJSModule>* module, SlowFunctionVector* slowFuncs) {
- return mg_.finish(parser_.tokenStream, module, slowFuncs);
+ module_ = cx_->new_<AsmJSModule>(parser_.ss, srcStart, srcBodyStart, strict);
+ if (!module_)
+ return false;
+
+ moduleObj->setModule(module_);
+
+ return mg_.init();
+ }
+
+ bool finish(SlowFunctionVector* slowFuncs) {
+ uint32_t endBeforeCurly = tokenStream().currentToken().pos.end;
+ TokenPos pos;
+ JS_ALWAYS_TRUE(tokenStream().peekTokenPos(&pos, TokenStream::Operand));
+ uint32_t endAfterCurly = pos.end;
+
+ auto usesHeap = Module::HeapBool(module_->hasArrayView());
+ auto sharedHeap = Module::SharedBool(module_->isSharedView());
+ UniqueChars filename = make_string_copy(parser_.ss->filename());
+ if (!filename)
+ return false;
+
+ UniqueStaticLinkData linkData;
+ Module* wasm = mg_.finish(usesHeap, sharedHeap, Move(filename), &linkData, slowFuncs);
+ if (!wasm)
+ return false;
+
+ module_->finish(wasm, Move(linkData), endBeforeCurly, endAfterCurly);
+ return true;
}
// Mutable interface.
void initModuleFunctionName(PropertyName* name) { moduleFunctionName_ = name; }
void initGlobalArgumentName(PropertyName* n) { module().initGlobalArgumentName(n); }
void initImportArgumentName(PropertyName* n) { module().initImportArgumentName(n); }
void initBufferArgumentName(PropertyName* n) { module().initBufferArgumentName(n); }
- bool addGlobalVarInit(PropertyName* varName, const NumLit& lit, bool isConst) {
- // The type of a const is the exact type of the literal (since its value
- // cannot change) which is more precise than the corresponding vartype.
- Type type = isConst ? Type::lit(lit) : Type::var(lit.type());
+ bool addGlobalVarInit(PropertyName* var, const NumLit& lit, bool isConst) {
uint32_t globalDataOffset;
- if (!module().addGlobalVarInit(lit.value(), &globalDataOffset))
+ if (!mg_.allocateGlobalVar(lit.type(), &globalDataOffset))
return false;
Global::Which which = isConst ? Global::ConstantLiteral : Global::Variable;
Global* global = validationLifo_.new_<Global>(which);
if (!global)
return false;
global->u.varOrConst.globalDataOffset_ = globalDataOffset;
- global->u.varOrConst.type_ = type.which();
+ global->u.varOrConst.type_ = (isConst ? Type::lit(lit) : Type::var(lit.type())).which();
if (isConst)
global->u.varOrConst.literalValue_ = lit;
- return globals_.putNew(varName, global);
- }
- bool addGlobalVarImport(PropertyName* varName, PropertyName* fieldName, ValType importType,
- bool isConst)
- {
+ return globals_.putNew(var, global) &&
+ module().addGlobalVarInit(lit.value(), globalDataOffset);
+ }
+ bool addGlobalVarImport(PropertyName* var, PropertyName* field, ValType type, bool isConst) {
uint32_t globalDataOffset;
- if (!module().addGlobalVarImport(fieldName, importType, &globalDataOffset))
+ if (!mg_.allocateGlobalVar(type, &globalDataOffset))
return false;
Global::Which which = isConst ? Global::ConstantImport : Global::Variable;
Global* global = validationLifo_.new_<Global>(which);
if (!global)
return false;
global->u.varOrConst.globalDataOffset_ = globalDataOffset;
- global->u.varOrConst.type_ = Type::var(importType).which();
- return globals_.putNew(varName, global);
- }
- bool addArrayView(PropertyName* varName, Scalar::Type vt, PropertyName* maybeField)
- {
- if (!arrayViews_.append(ArrayView(varName, vt)))
+ global->u.varOrConst.type_ = Type::var(type).which();
+ return globals_.putNew(var, global) &&
+ module().addGlobalVarImport(field, type, globalDataOffset);
+ }
+ bool addArrayView(PropertyName* var, Scalar::Type vt, PropertyName* maybeField) {
+ if (!arrayViews_.append(ArrayView(var, vt)))
return false;
Global* global = validationLifo_.new_<Global>(Global::ArrayView);
if (!global)
return false;
- if (!module().addArrayView(vt, maybeField))
- return false;
global->u.viewInfo.viewType_ = vt;
- return globals_.putNew(varName, global);
- }
- bool addMathBuiltinFunction(PropertyName* varName, AsmJSMathBuiltinFunction func,
- PropertyName* fieldName)
+ return globals_.putNew(var, global) &&
+ module().addArrayView(vt, maybeField);
+ }
+ bool addMathBuiltinFunction(PropertyName* var, AsmJSMathBuiltinFunction func,
+ PropertyName* field)
{
- if (!module().addMathBuiltinFunction(func, fieldName))
- return false;
Global* global = validationLifo_.new_<Global>(Global::MathBuiltinFunction);
if (!global)
return false;
global->u.mathBuiltinFunc_ = func;
- return globals_.putNew(varName, global);
+ return globals_.putNew(var, global) &&
+ module().addMathBuiltinFunction(func, field);
}
private:
- bool addGlobalDoubleConstant(PropertyName* varName, double constant) {
+ bool addGlobalDoubleConstant(PropertyName* var, double constant) {
Global* global = validationLifo_.new_<Global>(Global::ConstantLiteral);
if (!global)
return false;
global->u.varOrConst.type_ = Type::Double;
global->u.varOrConst.literalValue_ = NumLit(NumLit::Double, DoubleValue(constant));
- return globals_.putNew(varName, global);
+ return globals_.putNew(var, global);
}
public:
- bool addMathBuiltinConstant(PropertyName* varName, double constant, PropertyName* fieldName) {
- if (!module().addMathBuiltinConstant(constant, fieldName))
- return false;
- return addGlobalDoubleConstant(varName, constant);
- }
- bool addGlobalConstant(PropertyName* varName, double constant, PropertyName* fieldName) {
- if (!module().addGlobalConstant(constant, fieldName))
- return false;
- return addGlobalDoubleConstant(varName, constant);
- }
- bool addAtomicsBuiltinFunction(PropertyName* varName, AsmJSAtomicsBuiltinFunction func,
- PropertyName* fieldName)
+ bool addMathBuiltinConstant(PropertyName* var, double constant, PropertyName* field) {
+ return addGlobalDoubleConstant(var, constant) &&
+ module().addMathBuiltinConstant(constant, field);
+ }
+ bool addGlobalConstant(PropertyName* var, double constant, PropertyName* field) {
+ return addGlobalDoubleConstant(var, constant) &&
+ module().addGlobalConstant(constant, field);
+ }
+ bool addAtomicsBuiltinFunction(PropertyName* var, AsmJSAtomicsBuiltinFunction func,
+ PropertyName* field)
{
- if (!module().addAtomicsBuiltinFunction(func, fieldName))
- return false;
Global* global = validationLifo_.new_<Global>(Global::AtomicsBuiltinFunction);
if (!global)
return false;
atomicsPresent_ = true;
global->u.atomicsBuiltinFunc_ = func;
- return globals_.putNew(varName, global);
- }
- bool addSimdCtor(PropertyName* varName, AsmJSSimdType type, PropertyName* fieldName) {
- if (!module().addSimdCtor(type, fieldName))
- return false;
+ return globals_.putNew(var, global) &&
+ module().addAtomicsBuiltinFunction(func, field);
+ }
+ bool addSimdCtor(PropertyName* var, AsmJSSimdType type, PropertyName* field) {
Global* global = validationLifo_.new_<Global>(Global::SimdCtor);
if (!global)
return false;
global->u.simdCtorType_ = type;
- return globals_.putNew(varName, global);
- }
- bool addSimdOperation(PropertyName* varName, AsmJSSimdType type, AsmJSSimdOperation op,
- PropertyName* typeVarName, PropertyName* opName)
+ return globals_.putNew(var, global) &&
+ module().addSimdCtor(type, field);
+ }
+ bool addSimdOperation(PropertyName* var, AsmJSSimdType type, AsmJSSimdOperation op,
+ PropertyName* opName)
{
- if (!module().addSimdOperation(type, op, opName))
- return false;
Global* global = validationLifo_.new_<Global>(Global::SimdOperation);
if (!global)
return false;
global->u.simdOp.type_ = type;
global->u.simdOp.which_ = op;
- return globals_.putNew(varName, global);
+ return globals_.putNew(var, global) &&
+ module().addSimdOperation(type, op, opName);
}
bool addByteLength(PropertyName* name) {
canValidateChangeHeap_ = true;
- if (!module().addByteLength())
- return false;
Global* global = validationLifo_.new_<Global>(Global::ByteLength);
- return global && globals_.putNew(name, global);
+ return global && globals_.putNew(name, global) &&
+ module().addByteLength();
}
bool addChangeHeap(PropertyName* name, ParseNode* fn, uint32_t mask, uint32_t min, uint32_t max) {
hasChangeHeap_ = true;
- module().addChangeHeap(mask, min, max);
Global* global = validationLifo_.new_<Global>(Global::ChangeHeap);
if (!global)
return false;
global->u.changeHeap.srcBegin_ = fn->pn_pos.begin;
global->u.changeHeap.srcEnd_ = fn->pn_pos.end;
- return globals_.putNew(name, global);
- }
- bool addArrayViewCtor(PropertyName* varName, Scalar::Type vt, PropertyName* fieldName) {
+ return globals_.putNew(name, global) &&
+ module().addChangeHeap(mask, min, max);
+ }
+ bool addArrayViewCtor(PropertyName* var, Scalar::Type vt, PropertyName* field) {
Global* global = validationLifo_.new_<Global>(Global::ArrayViewCtor);
if (!global)
return false;
- if (!module().addArrayViewCtor(vt, fieldName))
- return false;
global->u.viewInfo.viewType_ = vt;
- return globals_.putNew(varName, global);
- }
- bool addFFI(PropertyName* varName, PropertyName* field) {
+ return globals_.putNew(var, global) &&
+ module().addArrayViewCtor(vt, field);
+ }
+ bool addFFI(PropertyName* var, PropertyName* field) {
Global* global = validationLifo_.new_<Global>(Global::FFI);
if (!global)
return false;
uint32_t index;
if (!module().addFFI(field, &index))
return false;
global->u.ffiIndex_ = index;
- return globals_.putNew(varName, global);
- }
- bool addExportedFunction(const Func& func, PropertyName* maybeFieldName) {
+ return globals_.putNew(var, global);
+ }
+ bool addExport(const Func& func, PropertyName* maybeFieldName) {
MallocSig::ArgVector args;
if (!args.appendAll(func.sig().args()))
return false;
MallocSig sig(Move(args), func.sig().ret());
- return module().addExportedFunction(func.name(), func.index(), func.srcBegin(),
- func.srcEnd(), maybeFieldName, Move(sig));
- }
- bool addExportedChangeHeap(PropertyName* name, const Global& g, PropertyName* maybeFieldName) {
- return module().addExportedChangeHeap(name, g.changeHeapSrcBegin(), g.changeHeapSrcEnd(),
- maybeFieldName);
+ uint32_t wasmIndex;
+ if (!mg_.declareExport(Move(sig), func.index(), &wasmIndex))
+ return false;
+ return module().addExport(func.name(), maybeFieldName, wasmIndex,
+ func.srcBegin(), func.srcEnd());
+ }
+ bool addChangeHeapExport(PropertyName* name, const Global& g, PropertyName* maybeFieldName) {
+ return module().addExport(name, maybeFieldName, AsmJSModule::Export::ChangeHeap,
+ g.changeHeapSrcBegin(), g.changeHeapSrcEnd());
}
private:
const LifoSig* getLifoSig(const LifoSig& sig) {
return &sig;
}
const LifoSig* getLifoSig(const MallocSig& sig) {
return mg_.newLifoSig(sig);
}
@@ -1555,39 +1567,41 @@ class MOZ_STACK_CLASS ModuleValidator
if (!globals_.putNew(name, global))
return false;
const LifoSig* lifoSig = getLifoSig(sig);
if (!lifoSig)
return false;
FuncPtrTable* t = validationLifo_.new_<FuncPtrTable>(cx_, name, firstUse, *lifoSig, mask);
return t && funcPtrTables_.append(t);
}
- bool defineFuncPtrTable(uint32_t funcPtrTableIndex, ModuleGenerator::FuncIndexVector&& elems) {
+ bool defineFuncPtrTable(uint32_t funcPtrTableIndex, const Vector<uint32_t>& elems) {
FuncPtrTable& table = *funcPtrTables_[funcPtrTableIndex];
if (table.defined())
return false;
table.define();
- return mg_.defineFuncPtrTable(funcPtrTableIndex, Move(elems));
- }
- bool addExit(PropertyName* name, MallocSig&& sig, unsigned ffiIndex, unsigned* exitIndex,
+ mg_.defineFuncPtrTable(funcPtrTableIndex, elems);
+ return true;
+ }
+ bool addImport(PropertyName* name, MallocSig&& sig, unsigned ffiIndex, unsigned* importIndex,
const LifoSig** lifoSig)
{
- ExitDescriptor::Lookup lookup(name, sig);
- ExitMap::AddPtr p = exits_.lookupForAdd(lookup);
+ ImportDescriptor::Lookup lookup(name, sig);
+ ImportMap::AddPtr p = imports_.lookupForAdd(lookup);
if (p) {
*lifoSig = &p->key().sig();
- *exitIndex = p->value();
+ *importIndex = p->value();
return true;
}
*lifoSig = getLifoSig(sig);
if (!*lifoSig)
return false;
- if (!module().addExit(Move(sig), ffiIndex, exitIndex))
+ if (!mg_.declareImport(Move(sig), importIndex))
return false;
- return exits_.add(p, ExitDescriptor(name, **lifoSig), *exitIndex);
+ return imports_.add(p, ImportDescriptor(name, **lifoSig), *importIndex) &&
+ module().addImport(ffiIndex, *importIndex);
}
bool tryOnceToValidateChangeHeap() {
bool ret = canValidateChangeHeap_;
canValidateChangeHeap_ = false;
return ret;
}
bool hasChangeHeap() const {
@@ -1609,17 +1623,17 @@ class MOZ_STACK_CLASS ModuleValidator
return !!errorString_;
}
bool failOffset(uint32_t offset, const char* str) {
MOZ_ASSERT(!hasAlreadyFailed());
MOZ_ASSERT(errorOffset_ == UINT32_MAX);
MOZ_ASSERT(str);
errorOffset_ = offset;
- errorString_ = DuplicateString(cx_, str);
+ errorString_ = make_string_copy(str);
return false;
}
bool fail(ParseNode* pn, const char* str) {
return failOffset(pn->pn_pos.begin, str);
}
bool failfVAOffset(uint32_t offset, const char* fmt, va_list ap) {
@@ -1665,17 +1679,17 @@ class MOZ_STACK_CLASS ModuleValidator
return false;
}
// Read-only interface
ExclusiveContext* cx() const { return cx_; }
ParseNode* moduleFunctionNode() const { return moduleFunctionNode_; }
PropertyName* moduleFunctionName() const { return moduleFunctionName_; }
ModuleGenerator& mg() { return mg_; }
- AsmJSModule& module() const { return mg_.module(); }
+ AsmJSModule& module() const { return *module_; }
AsmJSParser& parser() const { return parser_; }
TokenStream& tokenStream() const { return parser_.tokenStream; }
bool supportsSimd() const { return supportsSimd_; }
unsigned numArrayViews() const {
return arrayViews_.length();
}
const ArrayView& arrayView(unsigned i) const {
@@ -1726,19 +1740,29 @@ class MOZ_STACK_CLASS ModuleValidator
bool lookupStandardSimdOpName(PropertyName* name, AsmJSSimdOperation* op) const {
if (SimdOperationNameMap::Ptr p = standardLibrarySimdOpNames_.lookup(name)) {
*op = p->value();
return true;
}
return false;
}
- void startFunctionBodies() {
- if (atomicsPresent_)
+ bool startFunctionBodies() {
+ if (atomicsPresent_) {
+#if defined(ENABLE_SHARED_ARRAY_BUFFER)
module().setViewsAreShared();
+#else
+ return failOffset(parser_.tokenStream.currentToken().pos.begin,
+ "shared memory and atomics not supported by this build");
+#endif
+ }
+ return true;
+ }
+ bool finishFunctionBodies() {
+ return mg_.finishFuncs();
}
};
} // namespace
/*****************************************************************************/
// Numeric literal utilities
@@ -2615,26 +2639,25 @@ CheckGlobalSimdImport(ModuleValidator& m
AsmJSSimdType simdType;
if (!IsSimdTypeName(m, field, &simdType))
return m.failName(initNode, "'%s' is not a standard SIMD type", field);
return m.addSimdCtor(varName, simdType, field);
}
static bool
CheckGlobalSimdOperationImport(ModuleValidator& m, const ModuleValidator::Global* global,
- ParseNode* initNode, PropertyName* varName, PropertyName* ctorVarName,
- PropertyName* opName)
+ ParseNode* initNode, PropertyName* varName, PropertyName* opName)
{
AsmJSSimdType simdType = global->simdCtorType();
AsmJSSimdOperation simdOp;
if (!m.lookupStandardSimdOpName(opName, &simdOp))
return m.failName(initNode, "'%s' is not a standard SIMD operation", opName);
if (!IsSimdValidOperationType(simdType, simdOp))
return m.failName(initNode, "'%s' is not an operation supported by the SIMD type", opName);
- return m.addSimdOperation(varName, simdType, simdOp, ctorVarName, opName);
+ return m.addSimdOperation(varName, simdType, simdOp, opName);
}
static bool
CheckGlobalDotImport(ModuleValidator& m, PropertyName* varName, ParseNode* initNode)
{
ParseNode* base = DotBase(initNode);
PropertyName* field = DotMember(initNode);
@@ -2686,17 +2709,17 @@ CheckGlobalDotImport(ModuleValidator& m,
const ModuleValidator::Global* global = m.lookupGlobal(base->name());
if (!global)
return m.failName(initNode, "%s not found in module global scope", base->name());
if (!global->isSimdCtor())
return m.failName(base, "expecting SIMD constructor name, got %s", field);
- return CheckGlobalSimdOperationImport(m, global, initNode, varName, base->name(), field);
+ return CheckGlobalSimdOperationImport(m, global, initNode, varName, field);
}
static bool
CheckModuleGlobal(ModuleValidator& m, ParseNode* var, bool isConst)
{
if (!IsDefinition(var))
return m.fail(var, "import variable names must be unique");
@@ -4014,18 +4037,17 @@ CheckFuncPtrCall(FunctionValidator& f, P
return false;
MallocSig sig(Move(args), ret);
uint32_t funcPtrTableIndex;
if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, sig, mask, &funcPtrTableIndex))
return false;
- uint32_t globalDataOffset = f.m().module().funcPtrTable(funcPtrTableIndex).globalDataOffset();
- f.patch32(globalDataOffsetAt, globalDataOffset);
+ f.patch32(globalDataOffsetAt, f.m().mg().funcPtrTableGlobalDataOffset(funcPtrTableIndex));
f.patchSig(sigAt, &f.m().funcPtrTable(funcPtrTableIndex).sig());
*type = Type::ret(ret);
return true;
}
static bool
CheckIsExternType(FunctionValidator& f, ParseNode* argNode, Type type)
@@ -4058,34 +4080,33 @@ CheckFFICall(FunctionValidator& f, Parse
case ExprType::F32: f.writeOp(F32::CallImport); break;
case ExprType::F64: f.writeOp(F64::CallImport); break;
case ExprType::I32x4: f.writeOp(I32X4::CallImport); break;
case ExprType::F32x4: f.writeOp(F32X4::CallImport); break;
}
// Global data offset
size_t offsetAt = f.temp32();
- // Pointer to the exit's signature in the module's lifo
+ // Pointer to the import's signature in the module's lifo
size_t sigAt = f.tempPtr();
// Call node position (asm.js specific)
WriteCallLineCol(f, callNode);
MallocSig::ArgVector args;
if (!CheckCallArgs<CheckIsExternType>(f, callNode, &args))
return false;
MallocSig sig(Move(args), ret);
- unsigned exitIndex = 0;
+ unsigned importIndex = 0;
const LifoSig* lifoSig = nullptr;
- if (!f.m().addExit(calleeName, Move(sig), ffiIndex, &exitIndex, &lifoSig))
- return false;
-
- JS_STATIC_ASSERT(offsetof(AsmJSModule::ExitDatum, exit) == 0);
- f.patch32(offsetAt, f.module().exit(exitIndex).globalDataOffset());
+ if (!f.m().addImport(calleeName, Move(sig), ffiIndex, &importIndex, &lifoSig))
+ return false;
+
+ f.patch32(offsetAt, f.m().mg().importExitGlobalDataOffset(importIndex));
f.patchSig(sigAt, lifoSig);
*type = Type::ret(ret);
return true;
}
static bool
CheckFloatCoercionArg(FunctionValidator& f, ParseNode* inputNode, Type inputType,
size_t opcodeAt)
@@ -5742,17 +5763,17 @@ CheckExprStatement(FunctionValidator& f,
enum class InterruptCheckPosition {
Head,
Loop
};
static void
MaybeAddInterruptCheck(FunctionValidator& f, InterruptCheckPosition pos, ParseNode* pn)
{
- if (f.m().module().usesSignalHandlersForInterrupt())
+ if (f.m().mg().args().useSignalHandlersForInterrupt)
return;
switch (pos) {
case InterruptCheckPosition::Head: f.writeOp(Stmt::InterruptCheckHead); break;
case InterruptCheckPosition::Loop: f.writeOp(Stmt::InterruptCheckLoop); break;
}
unsigned lineno = 0, column = 0;
@@ -6568,17 +6589,17 @@ CheckFuncPtrTable(ModuleValidator& m, Pa
unsigned length = ListLength(arrayLiteral);
if (!IsPowerOfTwo(length))
return m.failf(arrayLiteral, "function-pointer table length must be a power of 2 (is %u)", length);
unsigned mask = length - 1;
- ModuleGenerator::FuncIndexVector elems;
+ Vector<uint32_t> elemFuncIndices(m.cx());
const LifoSig* sig = nullptr;
for (ParseNode* elem = ListHead(arrayLiteral); elem; elem = NextNode(elem)) {
if (!elem->isKind(PNK_NAME))
return m.fail(elem, "function-pointer table's elements must be names of functions");
PropertyName* funcName = elem->name();
const ModuleValidator::Func* func = m.lookupFunction(funcName);
if (!func)
@@ -6586,25 +6607,25 @@ CheckFuncPtrTable(ModuleValidator& m, Pa
if (sig) {
if (*sig != func->sig())
return m.fail(elem, "all functions in table must have same signature");
} else {
sig = &func->sig();
}
- if (!elems.append(func->index()))
+ if (!elemFuncIndices.append(func->index()))
return false;
}
uint32_t funcPtrTableIndex;
if (!CheckFuncPtrTableAgainstExisting(m, var, var->name(), *sig, mask, &funcPtrTableIndex))
return false;
- if (!m.defineFuncPtrTable(funcPtrTableIndex, Move(elems)))
+ if (!m.defineFuncPtrTable(funcPtrTableIndex, elemFuncIndices))
return m.fail(var, "duplicate function-pointer definition");
return true;
}
static bool
CheckFuncPtrTables(ModuleValidator& m)
{
@@ -6639,20 +6660,20 @@ CheckModuleExportFunction(ModuleValidato
return m.fail(pn, "expected name of exported function");
PropertyName* funcName = pn->name();
const ModuleValidator::Global* global = m.lookupGlobal(funcName);
if (!global)
return m.failName(pn, "exported function name '%s' not found", funcName);
if (global->which() == ModuleValidator::Global::Function)
- return m.addExportedFunction(m.function(global->funcIndex()), maybeFieldName);
+ return m.addExport(m.function(global->funcIndex()), maybeFieldName);
if (global->which() == ModuleValidator::Global::ChangeHeap)
- return m.addExportedChangeHeap(funcName, *global, maybeFieldName);
+ return m.addChangeHeapExport(funcName, *global, maybeFieldName);
return m.failName(pn, "'%s' is not a function", funcName);
}
static bool
CheckModuleExportObject(ModuleValidator& m, ParseNode* object)
{
MOZ_ASSERT(object->isKind(PNK_OBJECT));
@@ -6725,24 +6746,23 @@ CheckModuleEnd(ModuleValidator &m)
"top-level export (return) must be the last statement");
}
m.parser().tokenStream.ungetToken();
return true;
}
static bool
-CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList,
- ScopedJSDeletePtr<AsmJSModule>* module, unsigned* time,
- SlowFunctionVector* slowFuncs)
+CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, HandleAsmJSModule obj,
+ unsigned* time, SlowFunctionVector* slowFuncs)
{
int64_t before = PRMJ_Now();
ModuleValidator m(cx, parser);
- if (!m.init())
+ if (!m.init(obj))
return false;
if (PropertyName* moduleFunctionName = FunctionName(m.moduleFunctionNode())) {
if (!CheckModuleLevelName(m, m.moduleFunctionNode(), moduleFunctionName))
return false;
m.initModuleFunctionName(moduleFunctionName);
}
@@ -6756,66 +6776,63 @@ CheckModule(ExclusiveContext* cx, AsmJSP
return false;
if (!CheckModuleProcessingDirectives(m))
return false;
if (!CheckModuleGlobals(m))
return false;
- m.startFunctionBodies();
-
-#if !defined(ENABLE_SHARED_ARRAY_BUFFER)
- if (m.usesSharedMemory())
- return m.failOffset(m.parser().tokenStream.currentToken().pos.begin,
- "shared memory and atomics not supported by this build");
-#endif
+ if (!m.startFunctionBodies())
+ return false;
if (!CheckFunctions(m))
return false;
+ if (!m.finishFunctionBodies())
+ return false;
+
if (!CheckFuncPtrTables(m))
return false;
if (!CheckModuleReturn(m))
return false;
if (!CheckModuleEnd(m))
return false;
- if (!m.finish(module, slowFuncs))
+ if (!m.finish(slowFuncs))
return false;
*time = (PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC;
return true;
}
-static bool
-BuildConsoleMessage(ExclusiveContext* cx, AsmJSModule& module,
- unsigned time, const SlowFunctionVector& slowFuncs,
- JS::AsmJSCacheResult cacheResult, ScopedJSFreePtr<char>* out)
+static UniqueChars
+BuildConsoleMessage(ExclusiveContext* cx, AsmJSModule& module, unsigned time,
+ const SlowFunctionVector& slowFuncs, JS::AsmJSCacheResult cacheResult)
{
#ifndef JS_MORE_DETERMINISTIC
- ScopedJSFreePtr<char> slowText;
+ UniqueChars slowText;
if (!slowFuncs.empty()) {
slowText.reset(JS_smprintf("; %d functions compiled slowly: ", slowFuncs.length()));
if (!slowText)
- return true;
+ return nullptr;
for (unsigned i = 0; i < slowFuncs.length(); i++) {
const SlowFunction& func = slowFuncs[i];
JSAutoByteString name;
if (!AtomToPrintableString(cx, func.name, &name))
- return false;
+ return nullptr;
slowText.reset(JS_smprintf("%s%s:%u:%u (%ums)%s", slowText.get(),
name.ptr(), func.line, func.column, func.ms,
i+1 < slowFuncs.length() ? ", " : ""));
if (!slowText)
- return true;
+ return nullptr;
}
}
const char* cacheString = "";
switch (cacheResult) {
case JS::AsmJSCache_Success:
cacheString = "stored in cache";
break;
@@ -6844,21 +6861,21 @@ BuildConsoleMessage(ExclusiveContext* cx
case JS::AsmJSCache_InternalError:
cacheString = "unable to store in cache due to internal error (consider filing a bug)";
break;
case JS::AsmJSCache_LIMIT:
MOZ_CRASH("bad AsmJSCacheResult");
break;
}
- out->reset(JS_smprintf("total compilation time %dms; %s%s",
- time, cacheString, slowText ? slowText.get() : ""));
+ return UniqueChars(JS_smprintf("total compilation time %dms; %s%s",
+ time, cacheString, slowText ? slowText.get() : ""));
+#else
+ return make_string_copy("");
#endif
-
- return true;
}
static bool
Warn(AsmJSParser& parser, int errorNumber, const char* str)
{
ParseReportKind reportKind = parser.options().throwOnAsmJSValidationFailureOption &&
errorNumber == JSMSG_USE_ASM_TYPE_FAIL
? ParseError
@@ -6912,52 +6929,50 @@ bool
js::ValidateAsmJS(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, bool* validated)
{
*validated = false;
// Various conditions disable asm.js optimizations.
if (!EstablishPreconditions(cx, parser))
return NoExceptionPending(cx);
- ScopedJSDeletePtr<AsmJSModule> module;
- ScopedJSFreePtr<char> message;
+ Rooted<AsmJSModuleObject*> moduleObj(cx, AsmJSModuleObject::create(cx));
+ if (!moduleObj)
+ return false;
// Before spending any time parsing the module, try to look it up in the
// embedding's cache using the chars about to be parsed as the key.
- if (!LookupAsmJSModuleInCache(cx, parser, &module, &message))
+ bool loadedFromCache;
+ UniqueChars message;
+ if (!LookupAsmJSModuleInCache(cx, parser, moduleObj, &loadedFromCache, &message))
return false;
// If not present in the cache, parse, validate and generate code in a
// single linear pass over the chars of the asm.js module.
- if (!module) {
+ if (!loadedFromCache) {
// "Checking" parses, validates and compiles, producing a fully compiled
- // AsmJSModule as result.
+ // AsmJSModuleObject as result.
unsigned time;
SlowFunctionVector slowFuncs(cx);
- if (!CheckModule(cx, parser, stmtList, &module, &time, &slowFuncs))
+ if (!CheckModule(cx, parser, stmtList, moduleObj, &time, &slowFuncs))
return NoExceptionPending(cx);
// Try to store the AsmJSModule in the embedding's cache. The
// AsmJSModule must be stored before static linking since static linking
// specializes the AsmJSModule to the current process's address space
// and therefore must be executed after a cache hit.
- JS::AsmJSCacheResult cacheResult = StoreAsmJSModuleInCache(parser, *module, cx);
- module->staticallyLink(cx);
-
- if (!BuildConsoleMessage(cx, *module, time, slowFuncs, cacheResult, &message))
+ AsmJSModule& module = moduleObj->module();
+ JS::AsmJSCacheResult cacheResult = StoreAsmJSModuleInCache(parser, module, cx);
+ if (!module.staticallyLink(cx))
return false;
- }
-
- // The AsmJSModuleObject isn't directly referenced by user code; it is only
- // referenced (and kept alive by) an internal slot of the asm.js module
- // function generated below and asm.js export functions generated when the
- // asm.js module function is called.
- RootedObject moduleObj(cx, AsmJSModuleObject::create(cx, &module));
- if (!moduleObj)
- return false;
+
+ message = BuildConsoleMessage(cx, module, time, slowFuncs, cacheResult);
+ if (!message)
+ return NoExceptionPending(cx);
+ }
// The module function dynamically links the AsmJSModule when called and
// generates a set of functions wrapping all the exports.
FunctionBox* funbox = parser.pc->maybeFunction->pn_funbox;
RootedFunction moduleFun(cx, NewAsmJSModuleFunction(cx, funbox->function(), moduleObj));
if (!moduleFun)
return false;
--- a/js/src/asmjs/AsmJSValidate.h
+++ b/js/src/asmjs/AsmJSValidate.h
@@ -22,17 +22,16 @@
#include "mozilla/MathAlgorithms.h"
#include <stddef.h>
#include "jsutil.h"
#include "jit/Registers.h"
#include "js/TypeDecls.h"
-#include "vm/NativeObject.h"
namespace js {
class ExclusiveContext;
namespace frontend {
template <typename ParseHandler> class Parser;
template <typename ParseHandler> struct ParseContext;
class FullParseHandler;
deleted file mode 100644
--- a/js/src/asmjs/WasmCompileArgs.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- *
- * Copyright 2015 Mozilla Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef asmjs_wasm_compile_args_h
-#define asmjs_wasm_compile_args_h
-
-struct JSRuntime;
-
-namespace js {
-namespace wasm {
-
-struct CompileArgs
-{
- JSRuntime* runtime;
- bool usesSignalHandlersForOOB;
-
- CompileArgs(JSRuntime* runtime,
- bool usesSignalHandlersForOOB)
- : runtime(runtime),
- usesSignalHandlersForOOB(usesSignalHandlersForOOB)
- {}
-};
-
-} // namespace wasm
-} // namespace js
-
-#endif // asmjs_wasm_compile_args_h
rename from js/src/asmjs/AsmJSFrameIterator.cpp
rename to js/src/asmjs/WasmFrameIterator.cpp
--- a/js/src/asmjs/AsmJSFrameIterator.cpp
+++ b/js/src/asmjs/WasmFrameIterator.cpp
@@ -11,97 +11,117 @@
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include "asmjs/AsmJSFrameIterator.h"
+#include "asmjs/WasmFrameIterator.h"
+
+#include "jsatom.h"
#include "asmjs/AsmJSModule.h"
#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::DebugOnly;
+using mozilla::Swap;
/*****************************************************************************/
-// AsmJSFrameIterator implementation
+// FrameIterator implementation
static void*
ReturnAddressFromFP(void* fp)
{
return reinterpret_cast<AsmJSFrame*>(fp)->returnAddress;
}
static uint8_t*
CallerFPFromFP(void* fp)
{
return reinterpret_cast<AsmJSFrame*>(fp)->callerFP;
}
-AsmJSFrameIterator::AsmJSFrameIterator(const AsmJSActivation& activation)
- : module_(&activation.module()),
+FrameIterator::FrameIterator(const AsmJSActivation& activation)
+ : cx_(activation.cx()),
+ module_(&activation.module().wasm()),
fp_(activation.fp())
{
if (!fp_)
return;
settle();
}
void
-AsmJSFrameIterator::operator++()
+FrameIterator::operator++()
{
MOZ_ASSERT(!done());
DebugOnly<uint8_t*> oldfp = fp_;
fp_ += callsite_->stackDepth();
MOZ_ASSERT_IF(module_->profilingEnabled(), fp_ == CallerFPFromFP(oldfp));
settle();
}
void
-AsmJSFrameIterator::settle()
+FrameIterator::settle()
{
void* returnAddress = ReturnAddressFromFP(fp_);
- const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(returnAddress);
+ const CodeRange* codeRange = module_->lookupCodeRange(returnAddress);
MOZ_ASSERT(codeRange);
codeRange_ = codeRange;
switch (codeRange->kind()) {
- case AsmJSModule::CodeRange::Function:
+ case CodeRange::Function:
callsite_ = module_->lookupCallSite(returnAddress);
MOZ_ASSERT(callsite_);
break;
- case AsmJSModule::CodeRange::Entry:
+ case CodeRange::Entry:
fp_ = nullptr;
MOZ_ASSERT(done());
break;
- case AsmJSModule::CodeRange::JitFFI:
- case AsmJSModule::CodeRange::SlowFFI:
- case AsmJSModule::CodeRange::Interrupt:
- case AsmJSModule::CodeRange::Inline:
- case AsmJSModule::CodeRange::Thunk:
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::Interrupt:
+ case CodeRange::Inline:
MOZ_CRASH("Should not encounter an exit during iteration");
}
}
JSAtom*
-AsmJSFrameIterator::functionDisplayAtom() const
+FrameIterator::functionDisplayAtom() const
{
MOZ_ASSERT(!done());
- return reinterpret_cast<const AsmJSModule::CodeRange*>(codeRange_)->functionName(*module_);
+
+ const char* chars = module_->functionName(codeRange_->funcNameIndex());
+ UTF8Chars utf8(chars, strlen(chars));
+
+ size_t twoByteLength;
+ UniquePtr<char16_t> twoByte(JS::UTF8CharsToNewTwoByteCharsZ(cx_, utf8, &twoByteLength).get());
+ if (!twoByte) {
+ cx_->clearPendingException();
+ return cx_->names().empty;
+ }
+
+ JSAtom* atom = AtomizeChars(cx_, twoByte.get(), twoByteLength);
+ if (!atom) {
+ cx_->clearPendingException();
+ return cx_->names().empty;
+ }
+
+ return atom;
}
unsigned
-AsmJSFrameIterator::computeLine(uint32_t* column) const
+FrameIterator::computeLine(uint32_t* column) const
{
MOZ_ASSERT(!done());
if (column)
*column = callsite_->column();
return callsite_->line();
}
/*****************************************************************************/
@@ -158,33 +178,33 @@ PushRetAddr(MacroAssembler& masm)
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
masm.push(ra);
#else
// The x86/x64 call instruction pushes the return address.
#endif
}
// Generate a prologue that maintains AsmJSActivation::fp as the virtual frame
-// pointer so that AsmJSProfilingFrameIterator can walk the stack at any pc in
+// pointer so that ProfilingFrameIterator can walk the stack at any pc in
// generated code.
static void
GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
- AsmJSProfilingOffsets* offsets, Label* maybeEntry = nullptr)
+ ProfilingOffsets* offsets, Label* maybeEntry = nullptr)
{
#if !defined (JS_CODEGEN_ARM)
Register scratch = ABIArgGenerator::NonArg_VolatileReg;
#else
// Unfortunately, there are no unused non-arg volatile registers on ARM --
// the MacroAssembler claims both lr and ip -- so we use the second scratch
// register (lr) and be very careful not to call any methods that use it.
Register scratch = lr;
masm.setSecondScratchReg(InvalidReg);
#endif
- // AsmJSProfilingFrameIterator needs to know the offsets of several key
+ // ProfilingFrameIterator needs to know the offsets of several key
// instructions from entry. To save space, we make these offsets static
// constants and assert that they match the actual codegen below. On ARM,
// this requires AutoForbidPools to prevent a constant pool from being
// randomly inserted between two instructions.
{
#if defined(JS_CODEGEN_ARM)
AutoForbidPools afp(&masm, /* number of instructions in scope = */ 5);
#endif
@@ -199,51 +219,51 @@ GenerateProfilingPrologue(MacroAssembler
masm.loadAsmJSActivation(scratch);
masm.push(Address(scratch, AsmJSActivation::offsetOfFP()));
MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - offsets->begin);
masm.storePtr(masm.getStackPointer(), Address(scratch, AsmJSActivation::offsetOfFP()));
MOZ_ASSERT_IF(!masm.oom(), StoredFP == masm.currentOffset() - offsets->begin);
}
- if (reason.kind() != ExitReason::None) {
- masm.store32_NoSecondScratch(Imm32(reason.pack()),
- Address(scratch, AsmJSActivation::offsetOfPackedExitReason()));
+ if (reason != ExitReason::None) {
+ masm.store32_NoSecondScratch(Imm32(int32_t(reason)),
+ Address(scratch, AsmJSActivation::offsetOfExitReason()));
}
#if defined(JS_CODEGEN_ARM)
masm.setSecondScratchReg(lr);
#endif
if (framePushed)
masm.subFromStackPtr(Imm32(framePushed));
}
// Generate the inverse of GenerateProfilingPrologue.
static void
GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
- AsmJSProfilingOffsets* offsets)
+ ProfilingOffsets* offsets)
{
Register scratch = ABIArgGenerator::NonReturn_VolatileReg0;
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
Register scratch2 = ABIArgGenerator::NonReturn_VolatileReg1;
#endif
if (framePushed)
masm.addToStackPtr(Imm32(framePushed));
masm.loadAsmJSActivation(scratch);
- if (reason.kind() != ExitReason::None) {
- masm.store32(Imm32(ExitReason::None),
- Address(scratch, AsmJSActivation::offsetOfPackedExitReason()));
+ if (reason != ExitReason::None) {
+ masm.store32(Imm32(int32_t(ExitReason::None)),
+ Address(scratch, AsmJSActivation::offsetOfExitReason()));
}
- // AsmJSProfilingFrameIterator assumes fixed offsets of the last few
+ // ProfilingFrameIterator assumes fixed offsets of the last few
// instructions from profilingReturn, so AutoForbidPools to ensure that
// unintended instructions are not automatically inserted.
{
#if defined(JS_CODEGEN_ARM)
AutoForbidPools afp(&masm, /* number of instructions in scope = */ 4);
#endif
// sp protects the stack from clobber via asynchronous signal handlers
@@ -267,21 +287,20 @@ GenerateProfilingEpilogue(MacroAssembler
}
}
// In profiling mode, we need to maintain fp so that we can unwind the stack at
// any pc. In non-profiling mode, the only way to observe AsmJSActivation::fp is
// to call out to C++ so, as an optimization, we don't update fp. To avoid
// recompilation when the profiling mode is toggled, we generate both prologues
// a priori and switch between prologues when the profiling mode is toggled.
-// Specifically, AsmJSModule::setProfilingEnabled patches all callsites to
+// Specifically, Module::setProfilingEnabled patches all callsites to
// either call the profiling or non-profiling entry point.
void
-js::GenerateAsmJSFunctionPrologue(MacroAssembler& masm, unsigned framePushed,
- AsmJSFunctionOffsets* offsets)
+wasm::GenerateFunctionPrologue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets)
{
#if defined(JS_CODEGEN_ARM)
// Flush pending pools so they do not get dumped between the 'begin' and
// 'entry' offsets since the difference must be less than UINT8_MAX.
masm.flushBuffer();
#endif
masm.haltingAlign(CodeAlignment);
@@ -296,24 +315,23 @@ js::GenerateAsmJSFunctionPrologue(MacroA
PushRetAddr(masm);
masm.subFromStackPtr(Imm32(framePushed + AsmJSFrameBytesAfterReturnAddress));
// Prologue join point, body begin:
masm.bind(&body);
masm.setFramePushed(framePushed);
}
-// Similar to GenerateAsmJSFunctionPrologue (see comment), we generate both a
+// Similar to GenerateFunctionPrologue (see comment), we generate both a
// profiling and non-profiling epilogue a priori. When the profiling mode is
-// toggled, AsmJSModule::setProfilingEnabled patches the 'profiling jump' to
+// toggled, Module::setProfilingEnabled patches the 'profiling jump' to
// either be a nop (falling through to the normal prologue) or a jump (jumping
// to the profiling epilogue).
void
-js::GenerateAsmJSFunctionEpilogue(MacroAssembler& masm, unsigned framePushed,
- AsmJSFunctionOffsets* offsets)
+wasm::GenerateFunctionEpilogue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets)
{
MOZ_ASSERT(masm.framePushed() == framePushed);
#if defined(JS_CODEGEN_ARM)
// Flush pending pools so they do not get dumped between the profilingReturn
// and profilingJump/profilingEpilogue offsets since the difference must be
// less than UINT8_MAX.
masm.flushBuffer();
@@ -324,17 +342,17 @@ js::GenerateAsmJSFunctionEpilogue(MacroA
{
#if defined(JS_CODEGEN_ARM)
// Forbid pools from being inserted between the profilingJump label and
// the nop since we need the location of the actual nop to patch it.
AutoForbidPools afp(&masm, 1);
#endif
// The exact form of this instruction must be kept consistent with the
- // patching in AsmJSModule::setProfilingEnabled.
+ // patching in Module::setProfilingEnabled.
offsets->profilingJump = masm.currentOffset();
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
masm.twoByteNop();
#elif defined(JS_CODEGEN_ARM)
masm.nop();
#elif defined(JS_CODEGEN_MIPS32)
masm.nop();
masm.nop();
@@ -356,153 +374,146 @@ js::GenerateAsmJSFunctionEpilogue(MacroA
masm.setFramePushed(0);
// Profiling epilogue:
offsets->profilingEpilogue = masm.currentOffset();
GenerateProfilingEpilogue(masm, framePushed, ExitReason::None, offsets);
}
void
-js::GenerateAsmJSExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
- AsmJSProfilingOffsets* offsets, Label* maybeEntry)
+wasm::GenerateExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+ ProfilingOffsets* offsets, Label* maybeEntry)
{
masm.haltingAlign(CodeAlignment);
GenerateProfilingPrologue(masm, framePushed, reason, offsets, maybeEntry);
masm.setFramePushed(framePushed);
}
void
-js::GenerateAsmJSExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
- AsmJSProfilingOffsets* offsets)
+wasm::GenerateExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+ ProfilingOffsets* offsets)
{
- // Inverse of GenerateAsmJSExitPrologue:
+ // Inverse of GenerateExitPrologue:
MOZ_ASSERT(masm.framePushed() == framePushed);
GenerateProfilingEpilogue(masm, framePushed, reason, offsets);
masm.setFramePushed(0);
}
/*****************************************************************************/
-// AsmJSProfilingFrameIterator
+// ProfilingFrameIterator
-AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& activation)
- : module_(&activation.module()),
+ProfilingFrameIterator::ProfilingFrameIterator(const AsmJSActivation& activation)
+ : module_(&activation.module().wasm()),
+ codeRange_(nullptr),
callerFP_(nullptr),
callerPC_(nullptr),
stackAddress_(nullptr),
- exitReason_(ExitReason::None),
- codeRange_(nullptr)
+ exitReason_(ExitReason::None)
{
// If profiling hasn't been enabled for this module, then CallerFPFromFP
// will be trash, so ignore the entire activation. In practice, this only
- // happens if profiling is enabled while module->active() (in this case,
+ // happens if profiling is enabled while !module->active() (in this case,
// profiling will be enabled when the module becomes inactive and gets
// called again).
if (!module_->profilingEnabled()) {
MOZ_ASSERT(done());
return;
}
initFromFP(activation);
}
static inline void
-AssertMatchesCallSite(const AsmJSModule& module, const AsmJSModule::CodeRange* calleeCodeRange,
+AssertMatchesCallSite(const Module& module, const CodeRange* calleeCodeRange,
void* callerPC, void* callerFP, void* fp)
{
#ifdef DEBUG
- const AsmJSModule::CodeRange* callerCodeRange = module.lookupCodeRange(callerPC);
+ const CodeRange* callerCodeRange = module.lookupCodeRange(callerPC);
MOZ_ASSERT(callerCodeRange);
- if (callerCodeRange->isEntry()) {
+ if (callerCodeRange->kind() == CodeRange::Entry) {
MOZ_ASSERT(callerFP == nullptr);
return;
}
const CallSite* callsite = module.lookupCallSite(callerPC);
- if (calleeCodeRange->isThunk()) {
- MOZ_ASSERT(!callsite);
- MOZ_ASSERT(callerCodeRange->isFunction());
- } else {
- MOZ_ASSERT(callsite);
- MOZ_ASSERT(callerFP == (uint8_t*)fp + callsite->stackDepth());
- }
+ MOZ_ASSERT(callsite);
+ MOZ_ASSERT(callerFP == (uint8_t*)fp + callsite->stackDepth());
#endif
}
void
-AsmJSProfilingFrameIterator::initFromFP(const AsmJSActivation& activation)
+ProfilingFrameIterator::initFromFP(const AsmJSActivation& activation)
{
uint8_t* fp = activation.fp();
// If a signal was handled while entering an activation, the frame will
// still be null.
if (!fp) {
MOZ_ASSERT(done());
return;
}
// Since we don't have the pc for fp, start unwinding at the caller of fp
// (ReturnAddressFromFP(fp)). This means that the innermost frame is
// skipped. This is fine because:
- // - for FFI calls, the innermost frame is a thunk, so the first frame that
- // shows up is the function calling the FFI;
- // - for Math and other builtin calls, when profiling is activated, we
- // patch all call sites to instead call through a thunk; and
- // - for interrupts, we just accept that we'll lose the innermost frame.
+ // - for import exit calls, the innermost frame is a thunk, so the first
+ // frame that shows up is the function calling the import;
+ // - for Math and other builtin calls as well as interrupts, we note the absence
+ // of an exit reason and inject a fake "builtin" frame; and
+ // - for async interrupts, we just accept that we'll lose the innermost frame.
void* pc = ReturnAddressFromFP(fp);
- const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(pc);
+ const CodeRange* codeRange = module_->lookupCodeRange(pc);
MOZ_ASSERT(codeRange);
codeRange_ = codeRange;
stackAddress_ = fp;
switch (codeRange->kind()) {
- case AsmJSModule::CodeRange::Entry:
+ case CodeRange::Entry:
callerPC_ = nullptr;
callerFP_ = nullptr;
break;
- case AsmJSModule::CodeRange::Function:
+ case CodeRange::Function:
fp = CallerFPFromFP(fp);
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp);
break;
- case AsmJSModule::CodeRange::JitFFI:
- case AsmJSModule::CodeRange::SlowFFI:
- case AsmJSModule::CodeRange::Interrupt:
- case AsmJSModule::CodeRange::Inline:
- case AsmJSModule::CodeRange::Thunk:
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::Interrupt:
+ case CodeRange::Inline:
MOZ_CRASH("Unexpected CodeRange kind");
}
- // Despite the above reasoning for skipping a frame, we do actually want FFI
- // trampolines and interrupts to show up in the profile (so they can
- // accumulate self time and explain performance faults). To do this, an
- // "exit reason" is stored on all the paths leaving asm.js and this iterator
- // treats this exit reason as its own frame. If we have exited asm.js code
- // without setting an exit reason, the reason will be None and this means
- // the code was asynchronously interrupted.
+ // The iterator inserts a pretend innermost frame for non-None ExitReasons.
+ // This allows the variety of exit reasons to show up in the callstack.
exitReason_ = activation.exitReason();
- if (exitReason_.kind() == ExitReason::None)
- exitReason_ = ExitReason::Interrupt;
+
+ // In the case of calls to builtins or asynchronous interrupts, no exit path
+ // is taken so the exitReason is None. Coerce these to the Native exit
+ // reason so that self-time is accounted for.
+ if (exitReason_ == ExitReason::None)
+ exitReason_ = ExitReason::Native;
MOZ_ASSERT(!done());
}
typedef JS::ProfilingFrameIterator::RegisterState RegisterState;
-AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& activation,
- const RegisterState& state)
- : module_(&activation.module()),
+ProfilingFrameIterator::ProfilingFrameIterator(const AsmJSActivation& activation,
+ const RegisterState& state)
+ : module_(&activation.module().wasm()),
+ codeRange_(nullptr),
callerFP_(nullptr),
callerPC_(nullptr),
- exitReason_(ExitReason::None),
- codeRange_(nullptr)
+ exitReason_(ExitReason::None)
{
// If profiling hasn't been enabled for this module, then CallerFPFromFP
// will be trash, so ignore the entire activation. In practice, this only
- // happens if profiling is enabled while module->active() (in this case,
+ // happens if profiling is enabled while !module->active() (in this case,
// profiling will be enabled when the module becomes inactive and gets
// called again).
if (!module_->profilingEnabled()) {
MOZ_ASSERT(done());
return;
}
// If pc isn't in the module, we must have exited the asm.js module via an
@@ -510,32 +521,31 @@ AsmJSProfilingFrameIterator::AsmJSProfil
if (!module_->containsCodePC(state.pc)) {
initFromFP(activation);
return;
}
// Note: fp may be null while entering and leaving the activation.
uint8_t* fp = activation.fp();
- const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(state.pc);
+ const CodeRange* codeRange = module_->lookupCodeRange(state.pc);
switch (codeRange->kind()) {
- case AsmJSModule::CodeRange::Function:
- case AsmJSModule::CodeRange::JitFFI:
- case AsmJSModule::CodeRange::SlowFFI:
- case AsmJSModule::CodeRange::Interrupt:
- case AsmJSModule::CodeRange::Thunk: {
+ case CodeRange::Function:
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::Interrupt: {
// When the pc is inside the prologue/epilogue, the innermost
// call's AsmJSFrame is not complete and thus fp points to the the
// second-to-innermost call's AsmJSFrame. Since fp can only tell you
// about its caller (via ReturnAddressFromFP(fp)), naively unwinding
// while pc is in the prologue/epilogue would skip the second-to-
// innermost call. To avoid this problem, we use the static structure of
// the code in the prologue and epilogue to do the Right Thing.
- uint32_t offsetInModule = (uint8_t*)state.pc - module_->codeBase();
- MOZ_ASSERT(offsetInModule < module_->codeBytes());
+ MOZ_ASSERT(module_->containsCodePC(state.pc));
+ uint32_t offsetInModule = (uint8_t*)state.pc - module_->code();
MOZ_ASSERT(offsetInModule >= codeRange->begin());
MOZ_ASSERT(offsetInModule < codeRange->end());
uint32_t offsetInCodeRange = offsetInModule - codeRange->begin();
void** sp = (void**)state.sp;
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
if (offsetInCodeRange < PushedRetAddr) {
// First instruction of the ARM/MIPS function; the return address is
// still in lr and fp still holds the caller's fp.
@@ -566,26 +576,26 @@ AsmJSProfilingFrameIterator::AsmJSProfil
} else {
// Not in the prologue/epilogue.
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp);
}
break;
}
- case AsmJSModule::CodeRange::Entry: {
+ case CodeRange::Entry: {
// The entry trampoline is the final frame in an AsmJSActivation. The entry
- // trampoline also doesn't GenerateAsmJSPrologue/Epilogue so we can't use
+ // trampoline also doesn't GeneratePrologue/Epilogue so we can't use
// the general unwinding logic above.
MOZ_ASSERT(!fp);
callerPC_ = nullptr;
callerFP_ = nullptr;
break;
}
- case AsmJSModule::CodeRange::Inline: {
+ case CodeRange::Inline: {
// The throw stub clears AsmJSActivation::fp on it's way out.
if (!fp) {
MOZ_ASSERT(done());
return;
}
// Most inline code stubs execute after the prologue/epilogue have
// completed so we can simply unwind based on fp. The only exception is
@@ -600,129 +610,225 @@ AsmJSProfilingFrameIterator::AsmJSProfil
}
codeRange_ = codeRange;
stackAddress_ = state.sp;
MOZ_ASSERT(!done());
}
void
-AsmJSProfilingFrameIterator::operator++()
+ProfilingFrameIterator::operator++()
{
- if (exitReason_.kind() != ExitReason::None) {
+ if (exitReason_ != ExitReason::None) {
MOZ_ASSERT(codeRange_);
exitReason_ = ExitReason::None;
MOZ_ASSERT(!done());
return;
}
if (!callerPC_) {
MOZ_ASSERT(!callerFP_);
codeRange_ = nullptr;
MOZ_ASSERT(done());
return;
}
MOZ_ASSERT(callerPC_);
- const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(callerPC_);
+ const CodeRange* codeRange = module_->lookupCodeRange(callerPC_);
MOZ_ASSERT(codeRange);
codeRange_ = codeRange;
switch (codeRange->kind()) {
- case AsmJSModule::CodeRange::Entry:
+ case CodeRange::Entry:
MOZ_ASSERT(callerFP_ == nullptr);
callerPC_ = nullptr;
break;
- case AsmJSModule::CodeRange::Function:
- case AsmJSModule::CodeRange::JitFFI:
- case AsmJSModule::CodeRange::SlowFFI:
- case AsmJSModule::CodeRange::Interrupt:
- case AsmJSModule::CodeRange::Inline:
- case AsmJSModule::CodeRange::Thunk:
+ case CodeRange::Function:
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::Interrupt:
+ case CodeRange::Inline:
stackAddress_ = callerFP_;
callerPC_ = ReturnAddressFromFP(callerFP_);
AssertMatchesCallSite(*module_, codeRange, callerPC_, CallerFPFromFP(callerFP_), callerFP_);
callerFP_ = CallerFPFromFP(callerFP_);
break;
}
MOZ_ASSERT(!done());
}
-static const char*
-BuiltinToName(Builtin builtin)
-{
- // Note: this label is regexp-matched by
- // devtools/client/profiler/cleopatra/js/parserWorker.js.
-
- switch (builtin) {
- case Builtin::ToInt32: return "ToInt32 (in asm.js)";
-#if defined(JS_CODEGEN_ARM)
- case Builtin::aeabi_idivmod: return "software idivmod (in asm.js)";
- case Builtin::aeabi_uidivmod: return "software uidivmod (in asm.js)";
- case Builtin::AtomicCmpXchg: return "Atomics.compareExchange (in asm.js)";
- case Builtin::AtomicXchg: return "Atomics.exchange (in asm.js)";
- case Builtin::AtomicFetchAdd: return "Atomics.add (in asm.js)";
- case Builtin::AtomicFetchSub: return "Atomics.sub (in asm.js)";
- case Builtin::AtomicFetchAnd: return "Atomics.and (in asm.js)";
- case Builtin::AtomicFetchOr: return "Atomics.or (in asm.js)";
- case Builtin::AtomicFetchXor: return "Atomics.xor (in asm.js)";
-#endif
- case Builtin::ModD: return "fmod (in asm.js)";
- case Builtin::SinD: return "Math.sin (in asm.js)";
- case Builtin::CosD: return "Math.cos (in asm.js)";
- case Builtin::TanD: return "Math.tan (in asm.js)";
- case Builtin::ASinD: return "Math.asin (in asm.js)";
- case Builtin::ACosD: return "Math.acos (in asm.js)";
- case Builtin::ATanD: return "Math.atan (in asm.js)";
- case Builtin::CeilD:
- case Builtin::CeilF: return "Math.ceil (in asm.js)";
- case Builtin::FloorD:
- case Builtin::FloorF: return "Math.floor (in asm.js)";
- case Builtin::ExpD: return "Math.exp (in asm.js)";
- case Builtin::LogD: return "Math.log (in asm.js)";
- case Builtin::PowD: return "Math.pow (in asm.js)";
- case Builtin::ATan2D: return "Math.atan2 (in asm.js)";
- case Builtin::Limit: break;
- }
- MOZ_CRASH("symbolic immediate not a builtin");
-}
-
const char*
-AsmJSProfilingFrameIterator::label() const
+ProfilingFrameIterator::label() const
{
MOZ_ASSERT(!done());
// Use the same string for both time inside and under so that the two
// entries will be coalesced by the profiler.
//
// NB: these labels are regexp-matched by
// devtools/client/profiler/cleopatra/js/parserWorker.js.
- const char* jitFFIDescription = "fast FFI trampoline (in asm.js)";
- const char* slowFFIDescription = "slow FFI trampoline (in asm.js)";
- const char* interruptDescription = "interrupt due to out-of-bounds or long execution (in asm.js)";
+ const char* importJitDescription = "fast FFI trampoline (in asm.js)";
+ const char* importInterpDescription = "slow FFI trampoline (in asm.js)";
+ const char* nativeDescription = "native call (in asm.js)";
- switch (exitReason_.kind()) {
+ switch (exitReason_) {
case ExitReason::None:
break;
- case ExitReason::Jit:
- return jitFFIDescription;
- case ExitReason::Slow:
- return slowFFIDescription;
- case ExitReason::Interrupt:
- return interruptDescription;
- case ExitReason::Builtin:
- return BuiltinToName(exitReason_.builtin());
+ case ExitReason::ImportJit:
+ return importJitDescription;
+ case ExitReason::ImportInterp:
+ return importInterpDescription;
+ case ExitReason::Native:
+ return nativeDescription;
}
- auto codeRange = reinterpret_cast<const AsmJSModule::CodeRange*>(codeRange_);
- switch (codeRange->kind()) {
- case AsmJSModule::CodeRange::Function: return codeRange->functionProfilingLabel(*module_);
- case AsmJSModule::CodeRange::Entry: return "entry trampoline (in asm.js)";
- case AsmJSModule::CodeRange::JitFFI: return jitFFIDescription;
- case AsmJSModule::CodeRange::SlowFFI: return slowFFIDescription;
- case AsmJSModule::CodeRange::Interrupt: return interruptDescription;
- case AsmJSModule::CodeRange::Inline: return "inline stub (in asm.js)";
- case AsmJSModule::CodeRange::Thunk: return BuiltinToName(codeRange->thunkTarget());
+ switch (codeRange_->kind()) {
+ case CodeRange::Function: return module_->profilingLabel(codeRange_->funcNameIndex());
+ case CodeRange::Entry: return "entry trampoline (in asm.js)";
+ case CodeRange::ImportJitExit: return importJitDescription;
+ case CodeRange::ImportInterpExit: return importInterpDescription;
+ case CodeRange::Interrupt: return nativeDescription;
+ case CodeRange::Inline: return "inline stub (in asm.js)";
}
MOZ_CRASH("bad code range kind");
}
+
+/*****************************************************************************/
+// Runtime patching to enable/disable profiling
+
+// Patch all internal (asm.js->asm.js) callsites to call the profiling
+// prologues:
+void
+wasm::EnableProfilingPrologue(Module& module, const CallSite& callSite, bool enabled)
+{
+ if (callSite.kind() != CallSite::Relative)
+ return;
+
+ uint8_t* callerRetAddr = module.code() + callSite.returnAddressOffset();
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ void* callee = X86Encoding::GetRel32Target(callerRetAddr);
+#elif defined(JS_CODEGEN_ARM)
+ uint8_t* caller = callerRetAddr - 4;
+ Instruction* callerInsn = reinterpret_cast<Instruction*>(caller);
+ BOffImm calleeOffset;
+ callerInsn->as<InstBLImm>()->extractImm(&calleeOffset);
+ void* callee = calleeOffset.getDest(callerInsn);
+#elif defined(JS_CODEGEN_ARM64)
+ MOZ_CRASH();
+ void* callee = nullptr;
+ (void)callerRetAddr;
+#elif defined(JS_CODEGEN_MIPS32)
+ Instruction* instr = (Instruction*)(callerRetAddr - 4 * sizeof(uint32_t));
+ void* callee = (void*)Assembler::ExtractLuiOriValue(instr, instr->next());
+#elif defined(JS_CODEGEN_MIPS64)
+ Instruction* instr = (Instruction*)(callerRetAddr - 6 * sizeof(uint32_t));
+ void* callee = (void*)Assembler::ExtractLoad64Value(instr);
+#elif defined(JS_CODEGEN_NONE)
+ MOZ_CRASH();
+ void* callee = nullptr;
+#else
+# error "Missing architecture"
+#endif
+
+ const CodeRange* codeRange = module.lookupCodeRange(callee);
+ if (!codeRange->isFunction())
+ return;
+
+ uint8_t* from = module.code() + codeRange->funcNonProfilingEntry();
+ uint8_t* to = module.code() + codeRange->funcProfilingEntry();
+ if (!enabled)
+ Swap(from, to);
+
+ MOZ_ASSERT(callee == from);
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ X86Encoding::SetRel32(callerRetAddr, to);
+#elif defined(JS_CODEGEN_ARM)
+ new (caller) InstBLImm(BOffImm(to - caller), Assembler::Always);
+#elif defined(JS_CODEGEN_ARM64)
+ (void)to;
+ MOZ_CRASH();
+#elif defined(JS_CODEGEN_MIPS32)
+ Assembler::WriteLuiOriInstructions(instr, instr->next(),
+ ScratchRegister, (uint32_t)to);
+ instr[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+#elif defined(JS_CODEGEN_MIPS64)
+ Assembler::WriteLoad64Instructions(instr, ScratchRegister, (uint64_t)to);
+ instr[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+#elif defined(JS_CODEGEN_NONE)
+ MOZ_CRASH();
+#else
+# error "Missing architecture"
+#endif
+}
+
+// Replace all the nops in all the epilogues of asm.js functions with jumps
+// to the profiling epilogues.
+void
+wasm::EnableProfilingEpilogue(Module& module, const CodeRange& codeRange, bool enabled)
+{
+ if (!codeRange.isFunction())
+ return;
+
+ uint8_t* jump = module.code() + codeRange.functionProfilingJump();
+ uint8_t* profilingEpilogue = module.code() + codeRange.funcProfilingEpilogue();
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // An unconditional jump with a 1 byte offset immediate has the opcode
+ // 0x90. The offset is relative to the address of the instruction after
+ // the jump. 0x66 0x90 is the canonical two-byte nop.
+ ptrdiff_t jumpImmediate = profilingEpilogue - jump - 2;
+ MOZ_ASSERT(jumpImmediate > 0 && jumpImmediate <= 127);
+ if (enabled) {
+ MOZ_ASSERT(jump[0] == 0x66);
+ MOZ_ASSERT(jump[1] == 0x90);
+ jump[0] = 0xeb;
+ jump[1] = jumpImmediate;
+ } else {
+ MOZ_ASSERT(jump[0] == 0xeb);
+ MOZ_ASSERT(jump[1] == jumpImmediate);
+ jump[0] = 0x66;
+ jump[1] = 0x90;
+ }
+#elif defined(JS_CODEGEN_ARM)
+ if (enabled) {
+ MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstNOP>());
+ new (jump) InstBImm(BOffImm(profilingEpilogue - jump), Assembler::Always);
+ } else {
+ MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstBImm>());
+ new (jump) InstNOP();
+ }
+#elif defined(JS_CODEGEN_ARM64)
+ (void)jump;
+ (void)profilingEpilogue;
+ MOZ_CRASH();
+#elif defined(JS_CODEGEN_MIPS32)
+ Instruction* instr = (Instruction*)jump;
+ if (enabled) {
+ Assembler::WriteLuiOriInstructions(instr, instr->next(),
+ ScratchRegister, (uint32_t)profilingEpilogue);
+ instr[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr);
+ } else {
+ instr[0].makeNop();
+ instr[1].makeNop();
+ instr[2].makeNop();
+ }
+#elif defined(JS_CODEGEN_MIPS64)
+ Instruction* instr = (Instruction*)jump;
+ if (enabled) {
+ Assembler::WriteLoad64Instructions(instr, ScratchRegister, (uint64_t)profilingEpilogue);
+ instr[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr);
+ } else {
+ instr[0].makeNop();
+ instr[1].makeNop();
+ instr[2].makeNop();
+ instr[3].makeNop();
+ instr[4].makeNop();
+ }
+#elif defined(JS_CODEGEN_NONE)
+ MOZ_CRASH();
+#else
+# error "Missing architecture"
+#endif
+}
rename from js/src/asmjs/AsmJSFrameIterator.h
rename to js/src/asmjs/WasmFrameIterator.h
--- a/js/src/asmjs/AsmJSFrameIterator.h
+++ b/js/src/asmjs/WasmFrameIterator.h
@@ -11,150 +11,111 @@
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef asmjs_AsmJSFrameIterator_h
-#define asmjs_AsmJSFrameIterator_h
+#ifndef wasm_frame_iterator_h
+#define wasm_frame_iterator_h
-#include <stdint.h>
-
-#include "asmjs/Wasm.h"
#include "js/ProfilingFrameIterator.h"
class JSAtom;
namespace js {
class AsmJSActivation;
-class AsmJSModule;
namespace jit { class MacroAssembler; class Label; }
-namespace wasm { class CallSite; }
+
+namespace wasm {
+
+class CallSite;
+class CodeRange;
+class Module;
+struct FuncOffsets;
+struct ProfilingOffsets;
// Iterates over the frames of a single AsmJSActivation, called synchronously
// from C++ in the thread of the asm.js. The one exception is that this iterator
// may be called from the interrupt callback which may be called asynchronously
// from asm.js code; in this case, the backtrace may not be correct.
-class AsmJSFrameIterator
+class FrameIterator
{
- const AsmJSModule* module_;
- const wasm::CallSite* callsite_;
+ JSContext* cx_;
+ const Module* module_;
+ const CallSite* callsite_;
+ const CodeRange* codeRange_;
uint8_t* fp_;
- // Really, a const AsmJSModule::CodeRange*, but no forward declarations of
- // nested classes, so use void* to avoid pulling in all of AsmJSModule.h.
- const void* codeRange_;
-
void settle();
public:
- explicit AsmJSFrameIterator() : module_(nullptr) {}
- explicit AsmJSFrameIterator(const AsmJSActivation& activation);
+ explicit FrameIterator() : fp_(nullptr) { MOZ_ASSERT(done()); }
+ explicit FrameIterator(const AsmJSActivation& activation);
void operator++();
bool done() const { return !fp_; }
JSAtom* functionDisplayAtom() const;
unsigned computeLine(uint32_t* column) const;
};
+// An ExitReason describes the possible reasons for leaving compiled wasm code
+// or the state of not having left compiled wasm code (ExitReason::None).
+enum class ExitReason : uint32_t
+{
+ None, // default state, the pc is in wasm code
+ ImportJit, // fast-path call directly into JIT code
+ ImportInterp, // slow-path call into C++ Invoke()
+ Native // implementation-dependent call to native C++ code
+};
+
// Iterates over the frames of a single AsmJSActivation, given an
// asynchrously-interrupted thread's state. If the activation's
// module is not in profiling mode, the activation is skipped.
-class AsmJSProfilingFrameIterator
+class ProfilingFrameIterator
{
- const AsmJSModule* module_;
+ const Module* module_;
+ const CodeRange* codeRange_;
uint8_t* callerFP_;
void* callerPC_;
void* stackAddress_;
- wasm::ExitReason exitReason_;
-
- // Really, a const AsmJSModule::CodeRange*, but no forward declarations of
- // nested classes, so use void* to avoid pulling in all of AsmJSModule.h.
- const void* codeRange_;
+ ExitReason exitReason_;
void initFromFP(const AsmJSActivation& activation);
public:
- AsmJSProfilingFrameIterator() : codeRange_(nullptr) {}
- explicit AsmJSProfilingFrameIterator(const AsmJSActivation& activation);
- AsmJSProfilingFrameIterator(const AsmJSActivation& activation,
- const JS::ProfilingFrameIterator::RegisterState& state);
+ ProfilingFrameIterator() : codeRange_(nullptr) {}
+ explicit ProfilingFrameIterator(const AsmJSActivation& activation);
+ ProfilingFrameIterator(const AsmJSActivation& activation,
+ const JS::ProfilingFrameIterator::RegisterState& state);
void operator++();
bool done() const { return !codeRange_; }
void* stackAddress() const { MOZ_ASSERT(!done()); return stackAddress_; }
const char* label() const;
};
-/******************************************************************************/
-// Prologue/epilogue code generation.
-
-struct AsmJSOffsets
-{
- MOZ_IMPLICIT AsmJSOffsets(uint32_t begin = 0,
- uint32_t end = 0)
- : begin(begin), end(end)
- {}
-
- // These define a [begin, end) contiguous range of instructions compiled
- // into an AsmJSModule::CodeRange.
- uint32_t begin;
- uint32_t end;
-};
-
-struct AsmJSProfilingOffsets : AsmJSOffsets
-{
- MOZ_IMPLICIT AsmJSProfilingOffsets(uint32_t profilingReturn = 0)
- : AsmJSOffsets(), profilingReturn(profilingReturn)
- {}
-
- // For CodeRanges with AsmJSProfilingOffsets, 'begin' is the offset of the
- // profiling entry.
- uint32_t profilingEntry() const { return begin; }
+// Prologue/epilogue code generation
+void
+GenerateExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+ ProfilingOffsets* offsets, jit::Label* maybeEntry = nullptr);
+void
+GenerateExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+ ProfilingOffsets* offsets);
+void
+GenerateFunctionPrologue(jit::MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets);
+void
+GenerateFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets);
- // The profiling return is the offset of the return instruction, which
- // precedes the 'end' by a variable number of instructions due to
- // out-of-line codegen.
- uint32_t profilingReturn;
-};
-
-struct AsmJSFunctionOffsets : AsmJSProfilingOffsets
-{
- MOZ_IMPLICIT AsmJSFunctionOffsets(uint32_t nonProfilingEntry = 0,
- uint32_t profilingJump = 0,
- uint32_t profilingEpilogue = 0)
- : AsmJSProfilingOffsets(),
- nonProfilingEntry(nonProfilingEntry),
- profilingJump(profilingJump),
- profilingEpilogue(profilingEpilogue)
- {}
-
- // Function CodeRanges have an additional non-profiling entry that comes
- // after the profiling entry and a non-profiling epilogue that comes before
- // the profiling epilogue.
- uint32_t nonProfilingEntry;
-
- // When profiling is enabled, the 'nop' at offset 'profilingJump' is
- // overwritten to be a jump to 'profilingEpilogue'.
- uint32_t profilingJump;
- uint32_t profilingEpilogue;
-};
+// Runtime patching to enable/disable profiling
void
-GenerateAsmJSExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, wasm::ExitReason reason,
- AsmJSProfilingOffsets* offsets, jit::Label* maybeEntry = nullptr);
-void
-GenerateAsmJSExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, wasm::ExitReason reason,
- AsmJSProfilingOffsets* offsets);
+EnableProfilingPrologue(Module& module, const CallSite& callSite, bool enabled);
void
-GenerateAsmJSFunctionPrologue(jit::MacroAssembler& masm, unsigned framePushed,
- AsmJSFunctionOffsets* offsets);
-void
-GenerateAsmJSFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed,
- AsmJSFunctionOffsets* offsets);
+EnableProfilingEpilogue(Module& module, const CodeRange& codeRange, bool enabled);
+} // namespace wasm
} // namespace js
-#endif // asmjs_AsmJSFrameIterator_h
+#endif // wasm_frame_iterator_h
--- a/js/src/asmjs/WasmGenerator.cpp
+++ b/js/src/asmjs/WasmGenerator.cpp
@@ -13,76 +13,66 @@
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asmjs/WasmGenerator.h"
-#include "asmjs/AsmJSModule.h"
+#include "asmjs/AsmJSValidate.h"
#include "asmjs/WasmStubs.h"
-#ifdef MOZ_VTUNE
-# include "vtune/VTuneWrapper.h"
-#endif
+
+#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
-static bool
-ParallelCompilationEnabled(ExclusiveContext* cx)
-{
- // Since there are a fixed number of helper threads and one is already being
- // consumed by this parsing task, ensure that there another free thread to
- // avoid deadlock. (Note: there is at most one thread used for parsing so we
- // don't have to worry about general dining philosophers.)
- if (HelperThreadState().threadCount <= 1 || !CanUseExtraThreads())
- return false;
-
- // If 'cx' isn't a JSContext, then we are already off the main thread so
- // off-thread compilation must be enabled.
- return !cx->isJSContext() || cx->asJSContext()->runtime()->canUseOffthreadIonCompilation();
-}
-
// ****************************************************************************
// ModuleGenerator
static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
ModuleGenerator::ModuleGenerator(ExclusiveContext* cx)
: cx_(cx),
+ args_(cx),
+ globalBytes_(InitialGlobalDataBytes),
+ slowFuncs_(cx),
lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
+ jcx_(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread())),
alloc_(&lifo_),
- masm_(MacroAssembler::AsmJSToken(), &alloc_),
+ masm_(MacroAssembler::AsmJSToken(), alloc_),
sigs_(cx),
parallel_(false),
outstanding_(0),
tasks_(cx),
freeTasks_(cx),
+ funcBytes_(0),
funcEntryOffsets_(cx),
- funcPtrTables_(cx),
- slowFuncs_(cx),
- active_(nullptr)
-{}
+ activeFunc_(nullptr),
+ finishedFuncs_(false)
+{
+ MOZ_ASSERT(IsCompilingAsmJS());
+}
ModuleGenerator::~ModuleGenerator()
{
if (parallel_) {
// Wait for any outstanding jobs to fail or complete.
if (outstanding_) {
AutoLockHelperThreadState lock;
while (true) {
- CompileTaskVector& worklist = HelperThreadState().wasmWorklist();
+ IonCompileTaskVector& worklist = HelperThreadState().wasmWorklist();
MOZ_ASSERT(outstanding_ >= worklist.length());
outstanding_ -= worklist.length();
worklist.clear();
- CompileTaskVector& finished = HelperThreadState().wasmFinishedList();
+ IonCompileTaskVector& finished = HelperThreadState().wasmFinishedList();
MOZ_ASSERT(outstanding_ >= finished.length());
outstanding_ -= finished.length();
finished.clear();
uint32_t numFailed = HelperThreadState().harvestFailedWasmJobs();
MOZ_ASSERT(outstanding_ >= numFailed);
outstanding_ -= numFailed;
@@ -95,24 +85,39 @@ ModuleGenerator::~ModuleGenerator()
MOZ_ASSERT(HelperThreadState().wasmCompilationInProgress);
HelperThreadState().wasmCompilationInProgress = false;
} else {
MOZ_ASSERT(!outstanding_);
}
}
-bool
-ModuleGenerator::init(ScriptSource* ss, uint32_t srcStart, uint32_t srcBodyStart, bool strict)
+static bool
+ParallelCompilationEnabled(ExclusiveContext* cx)
{
- if (!sigs_.init())
+ // Since there are a fixed number of helper threads and one is already being
+ // consumed by this parsing task, ensure that there another free thread to
+ // avoid deadlock. (Note: there is at most one thread used for parsing so we
+ // don't have to worry about general dining philosophers.)
+ if (HelperThreadState().threadCount <= 1 || !CanUseExtraThreads())
return false;
- module_ = cx_->new_<AsmJSModule>(ss, srcStart, srcBodyStart, strict, cx_->canUseSignalHandlers());
- if (!module_)
+ // If 'cx' isn't a JSContext, then we are already off the main thread so
+ // off-thread compilation must be enabled.
+ return !cx->isJSContext() || cx->asJSContext()->runtime()->canUseOffthreadIonCompilation();
+}
+
+bool
+ModuleGenerator::init()
+{
+ staticLinkData_ = cx_->make_unique<StaticLinkData>();
+ if (!staticLinkData_)
+ return false;
+
+ if (!sigs_.init())
return false;
uint32_t numTasks;
if (ParallelCompilationEnabled(cx_) &&
HelperThreadState().wasmCompilationInProgress.compareExchange(false, true))
{
#ifdef DEBUG
{
@@ -126,132 +131,47 @@ ModuleGenerator::init(ScriptSource* ss,
parallel_ = true;
numTasks = HelperThreadState().maxWasmCompilationThreads();
} else {
numTasks = 1;
}
if (!tasks_.initCapacity(numTasks))
return false;
+ JSRuntime* runtime = cx_->compartment()->runtimeFromAnyThread();
for (size_t i = 0; i < numTasks; i++)
- tasks_.infallibleEmplaceBack(COMPILATION_LIFO_DEFAULT_CHUNK_SIZE, args());
+ tasks_.infallibleEmplaceBack(runtime, args_, COMPILATION_LIFO_DEFAULT_CHUNK_SIZE);
if (!freeTasks_.reserve(numTasks))
return false;
for (size_t i = 0; i < numTasks; i++)
freeTasks_.infallibleAppend(&tasks_[i]);
return true;
}
bool
-ModuleGenerator::startFunc(PropertyName* name, unsigned line, unsigned column,
- FunctionGenerator* fg)
+ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset)
{
- MOZ_ASSERT(!active_);
-
- if (freeTasks_.empty() && !finishOutstandingTask())
- return false;
-
- CompileTask* task = freeTasks_.popCopy();
- FuncIR* func = task->lifo().new_<FuncIR>(task->lifo(), name, line, column);
- if (!func)
+ uint32_t pad = ComputeByteAlignment(globalBytes_, align);
+ if (UINT32_MAX - globalBytes_ < pad + bytes)
return false;
- task->init(*func);
- fg->m_ = this;
- fg->task_ = task;
- fg->func_ = func;
- active_ = fg;
- return true;
-}
-
-bool
-ModuleGenerator::finishFunc(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime,
- FunctionGenerator* fg)
-{
- MOZ_ASSERT(active_ == fg);
-
- fg->func_->finish(funcIndex, sig, generateTime);
-
- if (parallel_) {
- if (!StartOffThreadWasmCompile(cx_, fg->task_))
- return false;
- outstanding_++;
- } else {
- if (!CompileFunction(fg->task_))
- return false;
- if (!finishTask(fg->task_))
- return false;
- }
-
- fg->m_ = nullptr;
- fg->task_ = nullptr;
- fg->func_ = nullptr;
- active_ = nullptr;
- return true;
-}
-
-bool
-ModuleGenerator::finish(frontend::TokenStream& ts, ScopedJSDeletePtr<AsmJSModule>* module,
- SlowFunctionVector* slowFuncs)
-{
- MOZ_ASSERT(!active_);
-
- while (outstanding_ > 0) {
- if (!finishOutstandingTask())
- return false;
- }
-
- module_->setFunctionBytes(masm_.size());
-
- JitContext jitContext(CompileRuntime::get(args().runtime));
-
- // Now that all function definitions have been compiled and their function-
- // entry offsets are all known, patch inter-function calls and fill in the
- // function-pointer table offsets.
-
- if (!GenerateStubs(masm_, *module_, funcEntryOffsets_))
- return false;
-
- for (auto& cs : masm_.callSites()) {
- if (!cs.isInternal())
- continue;
- MOZ_ASSERT(cs.kind() == CallSiteDesc::Relative);
- uint32_t callerOffset = cs.returnAddressOffset();
- uint32_t calleeOffset = funcEntryOffsets_[cs.targetIndex()];
- masm_.patchCall(callerOffset, calleeOffset);
- }
-
- for (unsigned tableIndex = 0; tableIndex < funcPtrTables_.length(); tableIndex++) {
- FuncPtrTable& table = funcPtrTables_[tableIndex];
- AsmJSModule::OffsetVector entryOffsets;
- for (uint32_t funcIndex : table.elems)
- entryOffsets.append(funcEntryOffsets_[funcIndex]);
- module_->funcPtrTable(tableIndex).define(Move(entryOffsets));
- }
-
- masm_.finish();
- if (masm_.oom())
- return false;
-
- if (!module_->finish(cx_, ts, masm_))
- return false;
-
- *module = module_.forget();
- *slowFuncs = Move(slowFuncs_);
+ globalBytes_ += pad;
+ *globalDataOffset = globalBytes_;
+ globalBytes_ += bytes;
return true;
}
bool
ModuleGenerator::finishOutstandingTask()
{
MOZ_ASSERT(parallel_);
- CompileTask* task = nullptr;
+ IonCompileTask* task = nullptr;
{
AutoLockHelperThreadState lock;
while (true) {
MOZ_ASSERT(outstanding_ > 0);
if (HelperThreadState().wasmFailed())
return false;
@@ -264,104 +184,434 @@ ModuleGenerator::finishOutstandingTask()
HelperThreadState().wait(GlobalHelperThreadState::CONSUMER);
}
}
return finishTask(task);
}
bool
-ModuleGenerator::finishTask(CompileTask* task)
+ModuleGenerator::finishTask(IonCompileTask* task)
{
const FuncIR& func = task->func();
- FunctionCompileResults& results = task->results();
-
- // Merge the compiled results into the whole-module masm.
- size_t offset = masm_.size();
- if (!masm_.asmMergeWith(results.masm()))
- return false;
+ FuncCompileResults& results = task->results();
- // Create the code range now that we know offset of results in whole masm.
- AsmJSModule::CodeRange codeRange(func.line(), results.offsets());
- codeRange.functionOffsetBy(offset);
- if (!module_->addFunctionCodeRange(func.name(), codeRange))
- return false;
+ // Offset the recorded FuncOffsets by the offset of the function in the
+ // whole module's code segment.
+ uint32_t offsetInWhole = masm_.size();
+ results.offsets().offsetBy(offsetInWhole);
- // Compilation may complete out of order, so cannot simply append().
+ // Record the non-profiling entry for whole-module linking later.
if (func.index() >= funcEntryOffsets_.length()) {
if (!funcEntryOffsets_.resize(func.index() + 1))
return false;
}
- funcEntryOffsets_[func.index()] = codeRange.entry();
+ funcEntryOffsets_[func.index()] = results.offsets().nonProfilingEntry;
+
+ // Merge the compiled results into the whole-module masm.
+ DebugOnly<size_t> sizeBefore = masm_.size();
+ if (!masm_.asmMergeWith(results.masm()))
+ return false;
+ MOZ_ASSERT(masm_.size() == offsetInWhole + results.masm().size());
+
+ // Add the CodeRange for this function.
+ CacheableChars funcName = StringToNewUTF8CharsZ(cx_, *func.name());
+ if (!funcName)
+ return false;
+ uint32_t nameIndex = funcNames_.length();
+ if (!funcNames_.emplaceBack(Move(funcName)))
+ return false;
+ if (!codeRanges_.emplaceBack(nameIndex, func.line(), results.offsets()))
+ return false;
// Keep a record of slow functions for printing in the final console message.
unsigned totalTime = func.generateTime() + results.compileTime();
if (totalTime >= SlowFunction::msThreshold) {
- if (!slowFuncs_.append(SlowFunction(func.name(), totalTime, func.line(), func.column())))
+ if (!slowFuncs_.emplaceBack(func.name(), totalTime, func.line(), func.column()))
return false;
}
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
- AsmJSModule::ProfiledFunction pf(func.name(), codeRange.entry(), codeRange.end(),
- func.line(), func.column());
- if (!module().addProfiledFunction(pf))
- return false;
-#endif
-
task->reset();
freeTasks_.infallibleAppend(task);
return true;
}
-CompileArgs
-ModuleGenerator::args() const
-{
- return CompileArgs(cx_->compartment()->runtimeFromAnyThread(),
- module().usesSignalHandlersForOOB());
-}
-
const LifoSig*
ModuleGenerator::newLifoSig(const MallocSig& sig)
{
SigSet::AddPtr p = sigs_.lookupForAdd(sig);
if (p)
return *p;
LifoSig* lifoSig = LifoSig::new_(lifo_, sig);
if (!lifoSig || !sigs_.add(p, lifoSig))
return nullptr;
return lifoSig;
}
bool
-ModuleGenerator::declareFuncPtrTable(uint32_t numElems, uint32_t* funcPtrTableIndex)
+ModuleGenerator::allocateGlobalVar(ValType type, uint32_t* globalDataOffset)
+{
+ unsigned width = 0;
+ switch (type) {
+ case ValType::I32: case ValType::F32: width = 4; break;
+ case ValType::I64: case ValType::F64: width = 8; break;
+ case ValType::I32x4: case ValType::F32x4: width = 16; break;
+ }
+ return allocateGlobalBytes(width, width, globalDataOffset);
+}
+
+bool
+ModuleGenerator::declareImport(MallocSig&& sig, unsigned* index)
+{
+ static_assert(Module::SizeOfImportExit % sizeof(void*) == 0, "word aligned");
+
+ uint32_t globalDataOffset;
+ if (!allocateGlobalBytes(Module::SizeOfImportExit, sizeof(void*), &globalDataOffset))
+ return false;
+
+ *index = unsigned(imports_.length());
+ return imports_.emplaceBack(Move(sig), globalDataOffset);
+}
+
+uint32_t
+ModuleGenerator::numDeclaredImports() const
+{
+ return imports_.length();
+}
+
+uint32_t
+ModuleGenerator::importExitGlobalDataOffset(uint32_t index) const
+{
+ return imports_[index].exitGlobalDataOffset();
+}
+
+const MallocSig&
+ModuleGenerator::importSig(uint32_t index) const
+{
+ return imports_[index].sig();
+}
+
+bool
+ModuleGenerator::defineImport(uint32_t index, ProfilingOffsets interpExit, ProfilingOffsets jitExit)
+{
+ Import& import = imports_[index];
+ import.initInterpExitOffset(interpExit.begin);
+ import.initJitExitOffset(jitExit.begin);
+ return codeRanges_.emplaceBack(CodeRange::ImportInterpExit, interpExit) &&
+ codeRanges_.emplaceBack(CodeRange::ImportJitExit, jitExit);
+}
+
+bool
+ModuleGenerator::declareExport(MallocSig&& sig, uint32_t funcIndex, uint32_t* index)
+{
+ *index = exports_.length();
+ return exports_.emplaceBack(Move(sig), funcIndex);
+}
+
+uint32_t
+ModuleGenerator::exportFuncIndex(uint32_t index) const
+{
+ return exports_[index].funcIndex();
+}
+
+const MallocSig&
+ModuleGenerator::exportSig(uint32_t index) const
+{
+ return exports_[index].sig();
+}
+
+uint32_t
+ModuleGenerator::numDeclaredExports() const
+{
+ return exports_.length();
+}
+
+bool
+ModuleGenerator::defineExport(uint32_t index, Offsets offsets)
+{
+ exports_[index].initStubOffset(offsets.begin);
+ return codeRanges_.emplaceBack(CodeRange::Entry, offsets);
+}
+
+bool
+ModuleGenerator::startFunc(PropertyName* name, unsigned line, unsigned column,
+ FunctionGenerator* fg)
+{
+ MOZ_ASSERT(!activeFunc_);
+ MOZ_ASSERT(!finishedFuncs_);
+
+ if (freeTasks_.empty() && !finishOutstandingTask())
+ return false;
+
+ IonCompileTask* task = freeTasks_.popCopy();
+ FuncIR* func = task->lifo().new_<FuncIR>(task->lifo(), name, line, column);
+ if (!func)
+ return false;
+
+ task->init(*func);
+ fg->m_ = this;
+ fg->task_ = task;
+ fg->func_ = func;
+ activeFunc_ = fg;
+ return true;
+}
+
+bool
+ModuleGenerator::finishFunc(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime,
+ FunctionGenerator* fg)
+{
+ MOZ_ASSERT(activeFunc_ == fg);
+
+ fg->func_->finish(funcIndex, sig, generateTime);
+
+ if (parallel_) {
+ if (!StartOffThreadWasmCompile(cx_, fg->task_))
+ return false;
+ outstanding_++;
+ } else {
+ if (!IonCompileFunction(fg->task_))
+ return false;
+ if (!finishTask(fg->task_))
+ return false;
+ }
+
+ fg->m_ = nullptr;
+ fg->task_ = nullptr;
+ fg->func_ = nullptr;
+ activeFunc_ = nullptr;
+ return true;
+}
+
+bool
+ModuleGenerator::finishFuncs()
+{
+ MOZ_ASSERT(!activeFunc_);
+ MOZ_ASSERT(!finishedFuncs_);
+
+ while (outstanding_ > 0) {
+ if (!finishOutstandingTask())
+ return false;
+ }
+
+ // During codegen, all wasm->wasm (internal) calls use AsmJSInternalCallee
+ // as the call target, which contains the function-index of the target.
+ // These get recorded in a CallSiteAndTargetVector in the MacroAssembler
+ // so that we can patch them now that all the function entry offsets are
+ // known.
+
+ for (CallSiteAndTarget& cs : masm_.callSites()) {
+ if (!cs.isInternal())
+ continue;
+ MOZ_ASSERT(cs.kind() == CallSiteDesc::Relative);
+ uint32_t callerOffset = cs.returnAddressOffset();
+ uint32_t calleeOffset = funcEntryOffsets_[cs.targetIndex()];
+ masm_.patchCall(callerOffset, calleeOffset);
+ }
+
+ funcBytes_ = masm_.size();
+ finishedFuncs_ = true;
+ return true;
+}
+
+bool
+ModuleGenerator::declareFuncPtrTable(uint32_t numElems, uint32_t* index)
{
// Here just add an uninitialized FuncPtrTable and claim space in the global
// data section. Later, 'defineFuncPtrTable' will be called with function
// indices for all the elements of the table.
// Avoid easy way to OOM the process.
if (numElems > 1024 * 1024)
return false;
- if (!module_->declareFuncPtrTable(numElems, funcPtrTableIndex))
+ uint32_t globalDataOffset;
+ if (!allocateGlobalBytes(numElems * sizeof(void*), sizeof(void*), &globalDataOffset))
+ return false;
+
+ StaticLinkData::FuncPtrTableVector& tables = staticLinkData_->funcPtrTables;
+
+ *index = tables.length();
+ if (!tables.emplaceBack(globalDataOffset))
+ return false;
+
+ if (!tables.back().elemOffsets.resize(numElems))
return false;
- MOZ_ASSERT(*funcPtrTableIndex == funcPtrTables_.length());
- return funcPtrTables_.emplaceBack(numElems);
+ return true;
+}
+
+uint32_t
+ModuleGenerator::funcPtrTableGlobalDataOffset(uint32_t index) const
+{
+ return staticLinkData_->funcPtrTables[index].globalDataOffset;
+}
+
+void
+ModuleGenerator::defineFuncPtrTable(uint32_t index, const Vector<uint32_t>& elemFuncIndices)
+{
+ MOZ_ASSERT(finishedFuncs_);
+
+ StaticLinkData::FuncPtrTable& table = staticLinkData_->funcPtrTables[index];
+ MOZ_ASSERT(table.elemOffsets.length() == elemFuncIndices.length());
+
+ for (size_t i = 0; i < elemFuncIndices.length(); i++)
+ table.elemOffsets[i] = funcEntryOffsets_[elemFuncIndices[i]];
+}
+
+bool
+ModuleGenerator::defineInlineStub(Offsets offsets)
+{
+ MOZ_ASSERT(finishedFuncs_);
+ return codeRanges_.emplaceBack(CodeRange::Inline, offsets);
+}
+
+bool
+ModuleGenerator::defineSyncInterruptStub(ProfilingOffsets offsets)
+{
+ MOZ_ASSERT(finishedFuncs_);
+ return codeRanges_.emplaceBack(CodeRange::Interrupt, offsets);
+}
+
+bool
+ModuleGenerator::defineAsyncInterruptStub(Offsets offsets)
+{
+ MOZ_ASSERT(finishedFuncs_);
+ staticLinkData_->pod.interruptOffset = offsets.begin;
+ return codeRanges_.emplaceBack(CodeRange::Inline, offsets);
}
bool
-ModuleGenerator::defineFuncPtrTable(uint32_t funcPtrTableIndex, FuncIndexVector&& elems)
+ModuleGenerator::defineOutOfBoundsStub(Offsets offsets)
{
- // The AsmJSModule needs to know the offsets in the code section which won't
- // be known until 'finish'. So just remember the function indices for now
- // and wait until 'finish' to hand over the offsets to the AsmJSModule.
-
- FuncPtrTable& table = funcPtrTables_[funcPtrTableIndex];
- if (table.numDeclared != elems.length() || !table.elems.empty())
- return false;
-
- table.elems = Move(elems);
- return true;
+ MOZ_ASSERT(finishedFuncs_);
+ staticLinkData_->pod.outOfBoundsOffset = offsets.begin;
+ return codeRanges_.emplaceBack(CodeRange::Inline, offsets);
}
+Module*
+ModuleGenerator::finish(Module::HeapBool usesHeap,
+ Module::SharedBool sharedHeap,
+ UniqueChars filename,
+ UniqueStaticLinkData* staticLinkData,
+ SlowFunctionVector* slowFuncs)
+{
+ MOZ_ASSERT(!activeFunc_);
+ MOZ_ASSERT(finishedFuncs_);
+
+ if (!GenerateStubs(*this, usesHeap))
+ return nullptr;
+
+ masm_.finish();
+ if (masm_.oom())
+ return nullptr;
+
+ // Start global data on a new page so JIT code may be given independent
+ // protection flags. Note assumption that global data starts right after
+ // code below.
+ uint32_t codeBytes = AlignBytes(masm_.bytesNeeded(), AsmJSPageSize);
+
+ // Inflate the global bytes up to page size so that the total bytes are a
+ // page size (as required by the allocator functions).
+ globalBytes_ = AlignBytes(globalBytes_, AsmJSPageSize);
+ uint32_t totalBytes = codeBytes + globalBytes_;
+
+ // Allocate the code (guarded by a UniquePtr until it is given to the Module).
+ UniqueCodePtr code = AllocateCode(cx_, totalBytes);
+ if (!code)
+ return nullptr;
+
+ // Delay flushing until Module::dynamicallyLink. The flush-inhibited range
+ // is set by executableCopy.
+ AutoFlushICache afc("ModuleGenerator::finish", /* inhibit = */ true);
+ masm_.executableCopy(code.get());
+
+ // c.f. JitCode::copyFrom
+ MOZ_ASSERT(masm_.jumpRelocationTableBytes() == 0);
+ MOZ_ASSERT(masm_.dataRelocationTableBytes() == 0);
+ MOZ_ASSERT(masm_.preBarrierTableBytes() == 0);
+ MOZ_ASSERT(!masm_.hasSelfReference());
+
+ // Convert the CallSiteAndTargetVector (needed during generation) to a
+ // CallSiteVector (what is stored in the Module).
+ CallSiteVector callSites;
+ if (!callSites.appendAll(masm_.callSites()))
+ return nullptr;
+
+ // Add links to absolute addresses identified symbolically.
+ StaticLinkData::SymbolicLinkArray& symbolicLinks = staticLinkData_->symbolicLinks;
+ for (size_t i = 0; i < masm_.numAsmJSAbsoluteAddresses(); i++) {
+ AsmJSAbsoluteAddress src = masm_.asmJSAbsoluteAddress(i);
+ if (!symbolicLinks[src.target].append(src.patchAt.offset()))
+ return nullptr;
+ }
+
+ // Relative link metadata: absolute addresses that refer to another point within
+ // the asm.js module.
+
+ // CodeLabels are used for switch cases and loads from floating-point /
+ // SIMD values in the constant pool.
+ for (size_t i = 0; i < masm_.numCodeLabels(); i++) {
+ CodeLabel cl = masm_.codeLabel(i);
+ StaticLinkData::InternalLink link(StaticLinkData::InternalLink::CodeLabel);
+ link.patchAtOffset = masm_.labelToPatchOffset(*cl.patchAt());
+ link.targetOffset = cl.target()->offset();
+ if (!staticLinkData_->internalLinks.append(link))
+ return nullptr;
+ }
+
+#if defined(JS_CODEGEN_X86)
+ // Global data accesses in x86 need to be patched with the absolute
+ // address of the global. Globals are allocated sequentially after the
+ // code section so we can just use an InternalLink.
+ for (size_t i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) {
+ AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i);
+ StaticLinkData::InternalLink link(StaticLinkData::InternalLink::RawPointer);
+ link.patchAtOffset = masm_.labelToPatchOffset(a.patchAt);
+ link.targetOffset = codeBytes + a.globalDataOffset;
+ if (!staticLinkData_->internalLinks.append(link))
+ return nullptr;
+ }
+#endif
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ // On MIPS we need to update all the long jumps because they contain an
+ // absolute adress. The values are correctly patched for the current address
+ // space, but not after serialization or profiling-mode toggling.
+ for (size_t i = 0; i < masm_.numLongJumps(); i++) {
+ size_t off = masm_.longJump(i);
+ StaticLinkData::InternalLink link(StaticLinkData::InternalLink::InstructionImmediate);
+ link.patchAtOffset = off;
+ link.targetOffset = Assembler::ExtractInstructionImmediate(code.get() + off) -
+ uintptr_t(code.get());
+ if (!staticLinkData_->internalLinks.append(link))
+ return nullptr;
+ }
+#endif
+
+#if defined(JS_CODEGEN_X64)
+ // Global data accesses on x64 use rip-relative addressing and thus do
+ // not need patching after deserialization.
+ uint8_t* globalData = code.get() + codeBytes;
+ for (size_t i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) {
+ AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i);
+ masm_.patchAsmJSGlobalAccess(a.patchAt, code.get(), globalData, a.globalDataOffset);
+ }
+#endif
+
+ *staticLinkData = Move(staticLinkData_);
+ *slowFuncs = Move(slowFuncs_);
+ return cx_->new_<Module>(args_,
+ funcBytes_,
+ codeBytes,
+ globalBytes_,
+ usesHeap,
+ sharedHeap,
+ Move(code),
+ Move(imports_),
+ Move(exports_),
+ masm_.extractHeapAccesses(),
+ Move(codeRanges_),
+ Move(callSites),
+ Move(funcNames_),
+ Move(filename));
+}
--- a/js/src/asmjs/WasmGenerator.h
+++ b/js/src/asmjs/WasmGenerator.h
@@ -11,132 +11,168 @@
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef asmjs_wasm_generator_h
-#define asmjs_wasm_generator_h
+#ifndef wasm_generator_h
+#define wasm_generator_h
#include "asmjs/WasmIonCompile.h"
-#include "asmjs/WasmStubs.h"
+#include "asmjs/WasmIR.h"
+#include "asmjs/WasmModule.h"
#include "jit/MacroAssembler.h"
namespace js {
-
-class AsmJSModule;
-namespace fronted { class TokenStream; }
-
namespace wasm {
class FunctionGenerator;
+// A slow function describes a function that took longer than msThreshold to
+// validate and compile.
struct SlowFunction
{
SlowFunction(PropertyName* name, unsigned ms, unsigned line, unsigned column)
: name(name), ms(ms), line(line), column(column)
{}
static const unsigned msThreshold = 250;
PropertyName* name;
unsigned ms;
unsigned line;
unsigned column;
};
-
typedef Vector<SlowFunction> SlowFunctionVector;
// A ModuleGenerator encapsulates the creation of a wasm module. During the
// lifetime of a ModuleGenerator, a sequence of FunctionGenerators are created
// and destroyed to compile the individual function bodies. After generating all
// functions, ModuleGenerator::finish() must be called to complete the
// compilation and extract the resulting wasm module.
class MOZ_STACK_CLASS ModuleGenerator
{
- public:
- typedef Vector<uint32_t, 0, SystemAllocPolicy> FuncIndexVector;
-
- private:
- struct FuncPtrTable
- {
- uint32_t numDeclared;
- FuncIndexVector elems;
-
- explicit FuncPtrTable(uint32_t numDeclared) : numDeclared(numDeclared) {}
- FuncPtrTable(FuncPtrTable&& rhs) : numDeclared(rhs.numDeclared), elems(Move(rhs.elems)) {}
- };
- typedef Vector<FuncPtrTable> FuncPtrTableVector;
+ typedef Vector<uint32_t> FuncOffsetVector;
struct SigHashPolicy
{
typedef const MallocSig& Lookup;
static HashNumber hash(Lookup l) { return l.hash(); }
static bool match(const LifoSig* lhs, Lookup rhs) { return *lhs == rhs; }
};
typedef HashSet<const LifoSig*, SigHashPolicy> SigSet;
- ExclusiveContext* cx_;
- ScopedJSDeletePtr<AsmJSModule> module_;
+ ExclusiveContext* cx_;
+ CompileArgs args_;
- LifoAlloc lifo_;
- jit::TempAllocator alloc_;
- jit::MacroAssembler masm_;
- SigSet sigs_;
+ // Data handed over to the Module in finish()
+ uint32_t globalBytes_;
+ ImportVector imports_;
+ ExportVector exports_;
+ CodeRangeVector codeRanges_;
+ CacheableCharsVector funcNames_;
+
+ // Data handed back to the caller in finish()
+ UniqueStaticLinkData staticLinkData_;
+ SlowFunctionVector slowFuncs_;
- bool parallel_;
- uint32_t outstanding_;
- Vector<CompileTask> tasks_;
- Vector<CompileTask*> freeTasks_;
+ // Data scoped to the ModuleGenerator's lifetime
+ LifoAlloc lifo_;
+ jit::JitContext jcx_;
+ jit::TempAllocator alloc_;
+ jit::MacroAssembler masm_;
+ SigSet sigs_;
- FuncOffsetVector funcEntryOffsets_;
- FuncPtrTableVector funcPtrTables_;
+ // Parallel compilation
+ bool parallel_;
+ uint32_t outstanding_;
+ Vector<IonCompileTask> tasks_;
+ Vector<IonCompileTask*> freeTasks_;
- SlowFunctionVector slowFuncs_;
- mozilla::DebugOnly<FunctionGenerator*> active_;
+ // Function compilation
+ uint32_t funcBytes_;
+ FuncOffsetVector funcEntryOffsets_;
+ DebugOnly<FunctionGenerator*> activeFunc_;
+ DebugOnly<bool> finishedFuncs_;
+ bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset);
bool finishOutstandingTask();
- bool finishTask(CompileTask* task);
- CompileArgs args() const;
+ bool finishTask(IonCompileTask* task);
public:
explicit ModuleGenerator(ExclusiveContext* cx);
~ModuleGenerator();
- bool init(ScriptSource* ss, uint32_t srcStart, uint32_t srcBodyStart, bool strict);
- AsmJSModule& module() const { return *module_; }
+ bool init();
+
+ CompileArgs args() const { return args_; }
+ jit::MacroAssembler& masm() { return masm_; }
+ const FuncOffsetVector& funcEntryOffsets() const { return funcEntryOffsets_; }
const LifoSig* newLifoSig(const MallocSig& sig);
- bool declareFuncPtrTable(uint32_t numElems, uint32_t* funcPtrTableIndex);
- bool defineFuncPtrTable(uint32_t funcPtrTableIndex, FuncIndexVector&& elems);
+
+ // Global data:
+ bool allocateGlobalVar(ValType type, uint32_t* globalDataOffset);
+ // Imports:
+ bool declareImport(MallocSig&& sig, uint32_t* index);
+ uint32_t numDeclaredImports() const;
+ uint32_t importExitGlobalDataOffset(uint32_t index) const;
+ const MallocSig& importSig(uint32_t index) const;
+ bool defineImport(uint32_t index, ProfilingOffsets interpExit, ProfilingOffsets jitExit);
+
+ // Exports:
+ bool declareExport(MallocSig&& sig, uint32_t funcIndex, uint32_t* index);
+ uint32_t numDeclaredExports() const;
+ uint32_t exportFuncIndex(uint32_t index) const;
+ const MallocSig& exportSig(uint32_t index) const;
+ bool defineExport(uint32_t index, Offsets offsets);
+
+ // Functions:
bool startFunc(PropertyName* name, unsigned line, unsigned column, FunctionGenerator* fg);
bool finishFunc(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime, FunctionGenerator* fg);
+ bool finishFuncs();
- bool finish(frontend::TokenStream& ts, ScopedJSDeletePtr<AsmJSModule>* module,
- SlowFunctionVector* slowFuncs);
+ // Function-pointer tables:
+ bool declareFuncPtrTable(uint32_t numElems, uint32_t* index);
+ uint32_t funcPtrTableGlobalDataOffset(uint32_t index) const;
+ void defineFuncPtrTable(uint32_t index, const Vector<uint32_t>& elemFuncIndices);
+
+ // Stubs:
+ bool defineInlineStub(Offsets offsets);
+ bool defineSyncInterruptStub(ProfilingOffsets offsets);
+ bool defineAsyncInterruptStub(Offsets offsets);
+ bool defineOutOfBoundsStub(Offsets offsets);
+
+ // Null return indicates failure. The caller must immediately root a
+ // non-null return value.
+ Module* finish(Module::HeapBool usesHeap,
+ Module::SharedBool sharedHeap,
+ UniqueChars filename,
+ UniqueStaticLinkData* staticLinkData,
+ SlowFunctionVector* slowFuncs);
};
// A FunctionGenerator encapsulates the generation of a single function body.
// ModuleGenerator::startFunc must be called after construction and before doing
// anything else. After the body is complete, ModuleGenerator::finishFunc must
// be called before the FunctionGenerator is destroyed and the next function is
// started.
class MOZ_STACK_CLASS FunctionGenerator
{
friend class ModuleGenerator;
ModuleGenerator* m_;
- CompileTask* task_;
+ IonCompileTask* task_;
FuncIR* func_;
public:
FunctionGenerator() : m_(nullptr), task_(nullptr), func_(nullptr) {}
FuncIR& func() const { MOZ_ASSERT(func_); return *func_; }
};
} // namespace wasm
} // namespace js
-#endif // asmjs_wasm_generator_h
+#endif // wasm_generator_h
--- a/js/src/asmjs/WasmIR.h
+++ b/js/src/asmjs/WasmIR.h
@@ -11,20 +11,20 @@
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef asmjs_wasm_ir_h
-#define asmjs_wasm_ir_h
+#ifndef wasm_ir_h
+#define wasm_ir_h
-#include "asmjs/Wasm.h"
+#include "asmjs/WasmTypes.h"
namespace js {
class PropertyName;
namespace wasm {
enum class Stmt : uint8_t
@@ -396,18 +396,17 @@ enum NeedsBoundsCheck : uint8_t
// associated with a LifoAlloc allocation which contains all the memory
// referenced by the FuncIR.
class FuncIR
{
typedef Vector<wasm::Val, 4, LifoAllocPolicy<Fallible>> VarInitVector;
typedef Vector<uint8_t, 4096, LifoAllocPolicy<Fallible>> Bytecode;
// Note: this unrooted field assumes AutoKeepAtoms via TokenStream via
- // asm.js compilation. Wasm compilation will require an alternative way to
- // name CodeRanges (index).
+ // asm.js compilation.
PropertyName* name_;
unsigned line_;
unsigned column_;
uint32_t index_;
const wasm::LifoSig* sig_;
VarInitVector varInits_;
Bytecode bytecode_;
@@ -534,9 +533,9 @@ class FuncIR
wasm::Val varInit(size_t i) const { return varInits_[i]; }
size_t numLocals() const { return sig_->args().length() + varInits_.length(); }
unsigned generateTime() const { MOZ_ASSERT(generateTime_ != UINT_MAX); return generateTime_; }
};
} // namespace wasm
} // namespace js
-#endif // asmjs_wasm_ir_h
+#endif // wasm_ir_h
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -35,43 +35,40 @@ typedef Vector<MBasicBlock*, 8, SystemAl
class FunctionCompiler
{
private:
typedef HashMap<uint32_t, BlockVector, DefaultHasher<uint32_t>, SystemAllocPolicy> LabeledBlockMap;
typedef HashMap<size_t, BlockVector, DefaultHasher<uint32_t>, SystemAllocPolicy> UnlabeledBlockMap;
typedef Vector<size_t, 4, SystemAllocPolicy> PositionStack;
typedef Vector<ValType, 4, SystemAllocPolicy> LocalTypes;
- CompileArgs args_;
- const FuncIR& func_;
- size_t pc_;
-
- TempAllocator& alloc_;
- MIRGraph& graph_;
- const CompileInfo& info_;
- MIRGenerator& mirGen_;
-
- MBasicBlock* curBlock_;
-
- PositionStack loopStack_;
- PositionStack breakableStack_;
- UnlabeledBlockMap unlabeledBreaks_;
- UnlabeledBlockMap unlabeledContinues_;
- LabeledBlockMap labeledBreaks_;
- LabeledBlockMap labeledContinues_;
-
- LocalTypes localTypes_;
-
- FunctionCompileResults& compileResults_;
+ const FuncIR& func_;
+ size_t pc_;
+
+ TempAllocator& alloc_;
+ MIRGraph& graph_;
+ const CompileInfo& info_;
+ MIRGenerator& mirGen_;
+
+ MBasicBlock* curBlock_;
+
+ PositionStack loopStack_;
+ PositionStack breakableStack_;
+ UnlabeledBlockMap unlabeledBreaks_;
+ UnlabeledBlockMap unlabeledContinues_;
+ LabeledBlockMap labeledBreaks_;
+ LabeledBlockMap labeledContinues_;
+
+ LocalTypes localTypes_;
+
+ FuncCompileResults& compileResults_;
public:
- FunctionCompiler(CompileArgs args, const FuncIR& func, MIRGenerator& mirGen,
- FunctionCompileResults& compileResults)
- : args_(args),
- func_(func),
+ FunctionCompiler(const FuncIR& func, MIRGenerator& mirGen, FuncCompileResults& compileResults)
+ : func_(func),
pc_(0),
alloc_(mirGen.alloc()),
graph_(mirGen.graph()),
info_(mirGen.info()),
mirGen_(mirGen),
curBlock_(nullptr),
compileResults_(compileResults)
{}
@@ -754,17 +751,17 @@ class FunctionCompiler
}
MAsmJSLoadFFIFunc* ptrFun = MAsmJSLoadFFIFunc::New(alloc(), globalDataOffset);
curBlock_->add(ptrFun);
return callPrivate(MAsmJSCall::Callee(ptrFun), call, ret, def);
}
- bool builtinCall(Builtin builtin, const Call& call, ValType type, MDefinition** def)
+ bool builtinCall(SymbolicAddress builtin, const Call& call, ValType type, MDefinition** def)
{
return callPrivate(MAsmJSCall::Callee(builtin), call, ToExprType(type), def);
}
/*********************************************** Control flow generation */
inline bool inDeadCode() const {
return curBlock_ == nullptr;
@@ -1633,17 +1630,17 @@ EmitMathBuiltinCall(FunctionCompiler& f,
f.startCallArgs(&call);
MDefinition* firstArg;
if (!EmitF32Expr(f, &firstArg) || !f.passArg(firstArg, ValType::F32, &call))
return false;
f.finishCallArgs(&call);
- Builtin callee = f32 == F32::Ceil ? Builtin::CeilF : Builtin::FloorF;
+ SymbolicAddress callee = f32 == F32::Ceil ? SymbolicAddress::CeilF : SymbolicAddress::FloorF;
return f.builtinCall(callee, call, ValType::F32, def);
}
static bool
EmitMathBuiltinCall(FunctionCompiler& f, F64 f64, MDefinition** def)
{
uint32_t lineno, column;
ReadCallLineCol(f, &lineno, &column);
@@ -1656,30 +1653,30 @@ EmitMathBuiltinCall(FunctionCompiler& f,
return false;
if (f64 == F64::Pow || f64 == F64::Atan2) {
MDefinition* secondArg;
if (!EmitF64Expr(f, &secondArg) || !f.passArg(secondArg, ValType::F64, &call))
return false;
}
- Builtin callee;
+ SymbolicAddress callee;
switch (f64) {
- case F64::Ceil: callee = Builtin::CeilD; break;
- case F64::Floor: callee = Builtin::FloorD; break;
- case F64::Sin: callee = Builtin::SinD; break;
- case F64::Cos: callee = Builtin::CosD; break;
- case F64::Tan: callee = Builtin::TanD; break;
- case F64::Asin: callee = Builtin::ASinD; break;
- case F64::Acos: callee = Builtin::ACosD; break;
- case F64::Atan: callee = Builtin::ATanD; break;
- case F64::Exp: callee = Builtin::ExpD; break;
- case F64::Log: callee = Builtin::LogD; break;
- case F64::Pow: callee = Builtin::PowD; break;
- case F64::Atan2: callee = Builtin::ATan2D; break;
+ case F64::Ceil: callee = SymbolicAddress::CeilD; break;
+ case F64::Floor: callee = SymbolicAddress::FloorD; break;
+ case F64::Sin: callee = SymbolicAddress::SinD; break;
+ case F64::Cos: callee = SymbolicAddress::CosD; break;
+ case F64::Tan: callee = SymbolicAddress::TanD; break;
+ case F64::Asin: callee = SymbolicAddress::ASinD; break;
+ case F64::Acos: callee = SymbolicAddress::ACosD; break;
+ case F64::Atan: callee = SymbolicAddress::ATanD; break;
+ case F64::Exp: callee = SymbolicAddress::ExpD; break;
+ case F64::Log: callee = SymbolicAddress::LogD; break;
+ case F64::Pow: callee = SymbolicAddress::PowD; break;
+ case F64::Atan2: callee = SymbolicAddress::ATan2D; break;
default: MOZ_CRASH("unexpected double math builtin callee");
}
f.finishCallArgs(&call);
return f.builtinCall(callee, call, ValType::F64, def);
}
@@ -2927,36 +2924,35 @@ EmitF32X4Expr(FunctionCompiler& f, MDefi
return EmitSimdStore(f, ValType::F32x4, def);
case F32X4::Bad:
break;
}
MOZ_CRASH("unexpected float32x4 expression");
}
bool
-wasm::CompileFunction(CompileTask* task)
+wasm::IonCompileFunction(IonCompileTask* task)
{
int64_t before = PRMJ_Now();
- CompileArgs args = task->args();
const FuncIR& func = task->func();
- FunctionCompileResults& results = task->results();
-
- JitContext jitContext(CompileRuntime::get(args.runtime), &results.alloc());
+ FuncCompileResults& results = task->results();
+
+ JitContext jitContext(CompileRuntime::get(task->runtime()), &results.alloc());
const JitCompileOptions options;
MIRGraph graph(&results.alloc());
CompileInfo compileInfo(func.numLocals());
MIRGenerator mir(nullptr, options, &results.alloc(), &graph, &compileInfo,
IonOptimizations.get(Optimization_AsmJS),
- args.usesSignalHandlersForOOB);
+ task->args().useSignalHandlersForOOB);
// Build MIR graph
{
- FunctionCompiler f(args, func, mir, results);
+ FunctionCompiler f(func, mir, results);
if (!f.init())
return false;
while (!f.done()) {
if (!EmitStatement(f))
return false;
}
--- a/js/src/asmjs/WasmIonCompile.h
+++ b/js/src/asmjs/WasmIonCompile.h
@@ -11,93 +11,102 @@
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef asmjs_wasm_ion_compile_h
-#define asmjs_wasm_ion_compile_h
+#ifndef wasm_ion_compile_h
+#define wasm_ion_compile_h
-#include "asmjs/AsmJSFrameIterator.h"
-#include "asmjs/WasmCompileArgs.h"
#include "asmjs/WasmIR.h"
#include "jit/MacroAssembler.h"
namespace js {
namespace wasm {
-class FunctionCompileResults
+// The FuncCompileResults contains the results of compiling a single function
+// body, ready to be merged into the whole-module MacroAssembler.
+class FuncCompileResults
{
jit::TempAllocator alloc_;
jit::MacroAssembler masm_;
- AsmJSFunctionOffsets offsets_;
+ FuncOffsets offsets_;
unsigned compileTime_;
- FunctionCompileResults(const FunctionCompileResults&) = delete;
- FunctionCompileResults& operator=(const FunctionCompileResults&) = delete;
+ FuncCompileResults(const FuncCompileResults&) = delete;
+ FuncCompileResults& operator=(const FuncCompileResults&) = delete;
public:
- explicit FunctionCompileResults(LifoAlloc& lifo)
+ explicit FuncCompileResults(LifoAlloc& lifo)
: alloc_(&lifo),
- masm_(jit::MacroAssembler::AsmJSToken(), &alloc_),
+ masm_(jit::MacroAssembler::AsmJSToken(), alloc_),
compileTime_(0)
{}
jit::TempAllocator& alloc() { return alloc_; }
jit::MacroAssembler& masm() { return masm_; }
-
- AsmJSFunctionOffsets& offsets() { return offsets_; }
- const AsmJSFunctionOffsets& offsets() const { return offsets_; }
+ FuncOffsets& offsets() { return offsets_; }
void setCompileTime(unsigned t) { MOZ_ASSERT(!compileTime_); compileTime_ = t; }
unsigned compileTime() const { return compileTime_; }
};
-class CompileTask
+// An IonCompileTask represents the task of compiling a single function body. An
+// IonCompileTask is filled with the wasm code to be compiled on the main
+// validation thread, sent off to an Ion compilation helper thread which creates
+// the FuncCompileResults, and finally sent back to the validation thread. To
+// save time allocating and freeing memory, IonCompileTasks are reset() and
+// reused.
+class IonCompileTask
{
+ JSRuntime* const runtime_;
+ const CompileArgs args_;
LifoAlloc lifo_;
- const CompileArgs args_;
const FuncIR* func_;
- mozilla::Maybe<FunctionCompileResults> results_;
+ mozilla::Maybe<FuncCompileResults> results_;
- CompileTask(const CompileTask&) = delete;
- CompileTask& operator=(const CompileTask&) = delete;
+ IonCompileTask(const IonCompileTask&) = delete;
+ IonCompileTask& operator=(const IonCompileTask&) = delete;
public:
- CompileTask(size_t defaultChunkSize, CompileArgs args)
- : lifo_(defaultChunkSize),
+ IonCompileTask(JSRuntime* runtime, CompileArgs args, size_t defaultChunkSize)
+ : runtime_(runtime),
args_(args),
+ lifo_(defaultChunkSize),
func_(nullptr)
{}
+ JSRuntime* runtime() const {
+ return runtime_;
+ }
LifoAlloc& lifo() {
return lifo_;
}
CompileArgs args() const {
return args_;
}
void init(const FuncIR& func) {
func_ = &func;
results_.emplace(lifo_);
}
const FuncIR& func() const {
MOZ_ASSERT(func_);
return *func_;
}
- FunctionCompileResults& results() {
+ FuncCompileResults& results() {
return *results_;
}
void reset() {
func_ = nullptr;
results_.reset();
lifo_.releaseAll();
}
};
bool
-CompileFunction(CompileTask* task);
+IonCompileFunction(IonCompileTask* task);
} // namespace wasm
} // namespace js
-#endif // asmjs_wasm_ion_compile_h
+#endif // wasm_ion_compile_h
new file mode 100644
--- /dev/null
+++ b/js/src/asmjs/WasmModule.cpp
@@ -0,0 +1,1369 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asmjs/WasmModule.h"
+
+#include "mozilla/BinarySearch.h"
+#include "mozilla/EnumeratedRange.h"
+#include "mozilla/PodOperations.h"
+
+#include "jsprf.h"
+
+#include "asmjs/AsmJSValidate.h"
+#include "asmjs/WasmSerialize.h"
+#include "builtin/AtomicsObject.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/BaselineJIT.h"
+#include "jit/ExecutableAllocator.h"
+#include "js/MemoryMetrics.h"
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+
+#include "jit/MacroAssembler-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/TypeInference-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+using mozilla::BinarySearch;
+using mozilla::MakeEnumeratedRange;
+using mozilla::PodZero;
+using mozilla::Swap;
+using JS::GenericNaN;
+
+UniqueCodePtr
+wasm::AllocateCode(ExclusiveContext* cx, size_t bytes)
+{
+ // On most platforms, this will allocate RWX memory. On iOS, or when
+ // --non-writable-jitcode is used, this will allocate RW memory. In this
+ // case, DynamicallyLinkModule will reprotect the code as RX.
+ unsigned permissions =
+ ExecutableAllocator::initialProtectionFlags(ExecutableAllocator::Writable);
+
+ void* p = AllocateExecutableMemory(nullptr, bytes, permissions, "asm-js-code", AsmJSPageSize);
+ if (!p)
+ ReportOutOfMemory(cx);
+
+ MOZ_ASSERT(uintptr_t(p) % AsmJSPageSize == 0);
+
+ return UniqueCodePtr((uint8_t*)p, CodeDeleter(bytes));
+}
+
+void
+CodeDeleter::operator()(uint8_t* p)
+{
+ DeallocateExecutableMemory(p, bytes_, AsmJSPageSize);
+}
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+// On MIPS, CodeLabels are instruction immediates so InternalLinks only
+// patch instruction immediates.
+StaticLinkData::InternalLink::InternalLink(Kind kind)
+{
+ MOZ_ASSERT(kind == CodeLabel || kind == InstructionImmediate);
+}
+
+bool
+StaticLinkData::InternalLink::isRawPointerPatch()
+{
+ return false;
+}
+#else
+// On the rest, CodeLabels are raw pointers so InternalLinks only patch
+// raw pointers.
+StaticLinkData::InternalLink::InternalLink(Kind kind)
+{
+ MOZ_ASSERT(kind == CodeLabel || kind == RawPointer);
+}
+
+bool
+StaticLinkData::InternalLink::isRawPointerPatch()
+{
+ return true;
+}
+#endif
+
+size_t
+StaticLinkData::SymbolicLinkArray::serializedSize() const
+{
+ size_t size = 0;
+ for (const OffsetVector& offsets : *this)
+ size += SerializedPodVectorSize(offsets);
+ return size;
+}
+
+uint8_t*
+StaticLinkData::SymbolicLinkArray::serialize(uint8_t* cursor) const
+{
+ for (const OffsetVector& offsets : *this)
+ cursor = SerializePodVector(cursor, offsets);
+ return cursor;
+}
+
+const uint8_t*
+StaticLinkData::SymbolicLinkArray::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+ for (OffsetVector& offsets : *this) {
+ cursor = DeserializePodVector(cx, cursor, &offsets);
+ if (!cursor)
+ return nullptr;
+ }
+ return cursor;
+}
+
+bool
+StaticLinkData::SymbolicLinkArray::clone(JSContext* cx, SymbolicLinkArray* out) const
+{
+ for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+ if (!ClonePodVector(cx, (*this)[imm], &(*out)[imm]))
+ return false;
+ }
+ return true;
+}
+
+size_t
+StaticLinkData::SymbolicLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ size_t size = 0;
+ for (const OffsetVector& offsets : *this)
+ size += offsets.sizeOfExcludingThis(mallocSizeOf);
+ return size;
+}
+
+size_t
+StaticLinkData::FuncPtrTable::serializedSize() const
+{
+ return sizeof(globalDataOffset) +
+ SerializedPodVectorSize(elemOffsets);
+}
+
+uint8_t*
+StaticLinkData::FuncPtrTable::serialize(uint8_t* cursor) const
+{
+ cursor = WriteBytes(cursor, &globalDataOffset, sizeof(globalDataOffset));
+ cursor = SerializePodVector(cursor, elemOffsets);
+ return cursor;
+}
+
+const uint8_t*
+StaticLinkData::FuncPtrTable::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+ (cursor = ReadBytes(cursor, &globalDataOffset, sizeof(globalDataOffset))) &&
+ (cursor = DeserializePodVector(cx, cursor, &elemOffsets));
+ return cursor;
+}
+
+bool
+StaticLinkData::FuncPtrTable::clone(JSContext* cx, FuncPtrTable* out) const
+{
+ out->globalDataOffset = globalDataOffset;
+ return ClonePodVector(cx, elemOffsets, &out->elemOffsets);
+}
+
+size_t
+StaticLinkData::FuncPtrTable::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return elemOffsets.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t
+StaticLinkData::serializedSize() const
+{
+ return sizeof(pod) +
+ SerializedPodVectorSize(internalLinks) +
+ symbolicLinks.serializedSize() +
+ SerializedVectorSize(funcPtrTables);
+}
+
+uint8_t*
+StaticLinkData::serialize(uint8_t* cursor) const
+{
+ cursor = WriteBytes(cursor, &pod, sizeof(pod));
+ cursor = SerializePodVector(cursor, internalLinks);
+ cursor = symbolicLinks.serialize(cursor);
+ cursor = SerializeVector(cursor, funcPtrTables);
+ return cursor;
+}
+
+const uint8_t*
+StaticLinkData::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+ (cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
+ (cursor = DeserializePodVector(cx, cursor, &internalLinks)) &&
+ (cursor = symbolicLinks.deserialize(cx, cursor)) &&
+ (cursor = DeserializeVector(cx, cursor, &funcPtrTables));
+ return cursor;
+}
+
+bool
+StaticLinkData::clone(JSContext* cx, StaticLinkData* out) const
+{
+ out->pod = pod;
+ return ClonePodVector(cx, internalLinks, &out->internalLinks) &&
+ symbolicLinks.clone(cx, &out->symbolicLinks) &&
+ CloneVector(cx, funcPtrTables, &out->funcPtrTables);
+}
+
+size_t
+StaticLinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ size_t size = internalLinks.sizeOfExcludingThis(mallocSizeOf) +
+ symbolicLinks.sizeOfExcludingThis(mallocSizeOf) +
+ SizeOfVectorExcludingThis(funcPtrTables, mallocSizeOf);
+
+ for (const OffsetVector& offsets : symbolicLinks)
+ size += offsets.sizeOfExcludingThis(mallocSizeOf);
+
+ return size;
+}
+
+static size_t
+SerializedSigSize(const MallocSig& sig)
+{
+ return sizeof(ExprType) +
+ SerializedPodVectorSize(sig.args());
+}
+
+static uint8_t*
+SerializeSig(uint8_t* cursor, const MallocSig& sig)
+{
+ cursor = WriteScalar<ExprType>(cursor, sig.ret());
+ cursor = SerializePodVector(cursor, sig.args());
+ return cursor;
+}
+
+static const uint8_t*
+DeserializeSig(ExclusiveContext* cx, const uint8_t* cursor, MallocSig* sig)
+{
+ ExprType ret;
+ cursor = ReadScalar<ExprType>(cursor, &ret);
+
+ MallocSig::ArgVector args;
+ cursor = DeserializePodVector(cx, cursor, &args);
+ if (!cursor)
+ return nullptr;
+
+ sig->init(Move(args), ret);
+ return cursor;
+}
+
+static bool
+CloneSig(JSContext* cx, const MallocSig& sig, MallocSig* out)
+{
+ MallocSig::ArgVector args;
+ if (!ClonePodVector(cx, sig.args(), &args))
+ return false;
+
+ out->init(Move(args), sig.ret());
+ return true;
+}
+
+static size_t
+SizeOfSigExcludingThis(const MallocSig& sig, MallocSizeOf mallocSizeOf)
+{
+ return sig.args().sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t
+Export::serializedSize() const
+{
+ return SerializedSigSize(sig_) +
+ sizeof(pod);
+}
+
+uint8_t*
+Export::serialize(uint8_t* cursor) const
+{
+ cursor = SerializeSig(cursor, sig_);
+ cursor = WriteBytes(cursor, &pod, sizeof(pod));
+ return cursor;
+}
+
+const uint8_t*
+Export::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+ (cursor = DeserializeSig(cx, cursor, &sig_)) &&
+ (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
+ return cursor;
+}
+
+bool
+Export::clone(JSContext* cx, Export* out) const
+{
+ out->pod = pod;
+ return CloneSig(cx, sig_, &out->sig_);
+}
+
+size_t
+Export::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return SizeOfSigExcludingThis(sig_, mallocSizeOf);
+}
+
+size_t
+Import::serializedSize() const
+{
+ return SerializedSigSize(sig_) +
+ sizeof(pod);
+}
+
+uint8_t*
+Import::serialize(uint8_t* cursor) const
+{
+ cursor = SerializeSig(cursor, sig_);
+ cursor = WriteBytes(cursor, &pod, sizeof(pod));
+ return cursor;
+}
+
+const uint8_t*
+Import::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+ (cursor = DeserializeSig(cx, cursor, &sig_)) &&
+ (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
+ return cursor;
+}
+
+bool
+Import::clone(JSContext* cx, Import* out) const
+{
+ out->pod = pod;
+ return CloneSig(cx, sig_, &out->sig_);
+}
+
+size_t
+Import::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return SizeOfSigExcludingThis(sig_, mallocSizeOf);
+}
+
+CodeRange::CodeRange(Kind kind, Offsets offsets)
+ : nameIndex_(0),
+ lineNumber_(0),
+ begin_(offsets.begin),
+ profilingReturn_(0),
+ end_(offsets.end)
+{
+ PodZero(&u); // zero padding for Valgrind
+ u.kind_ = kind;
+
+ MOZ_ASSERT(begin_ <= end_);
+ MOZ_ASSERT(u.kind_ == Entry || u.kind_ == Inline);
+}
+
+CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
+ : nameIndex_(0),
+ lineNumber_(0),
+ begin_(offsets.begin),
+ profilingReturn_(offsets.profilingReturn),
+ end_(offsets.end)
+{
+ PodZero(&u); // zero padding for Valgrind
+ u.kind_ = kind;
+
+ MOZ_ASSERT(begin_ < profilingReturn_);
+ MOZ_ASSERT(profilingReturn_ < end_);
+ MOZ_ASSERT(u.kind_ == ImportJitExit || u.kind_ == ImportInterpExit || u.kind_ == Interrupt);
+}
+
+CodeRange::CodeRange(uint32_t nameIndex, uint32_t lineNumber, FuncOffsets offsets)
+ : nameIndex_(nameIndex),
+ lineNumber_(lineNumber)
+{
+ PodZero(&u); // zero padding for Valgrind
+ u.kind_ = Function;
+
+ MOZ_ASSERT(offsets.nonProfilingEntry - offsets.begin <= UINT8_MAX);
+ begin_ = offsets.begin;
+ u.func.beginToEntry_ = offsets.nonProfilingEntry - begin_;
+
+ MOZ_ASSERT(offsets.nonProfilingEntry < offsets.profilingReturn);
+ MOZ_ASSERT(offsets.profilingReturn - offsets.profilingJump <= UINT8_MAX);
+ MOZ_ASSERT(offsets.profilingReturn - offsets.profilingEpilogue <= UINT8_MAX);
+ profilingReturn_ = offsets.profilingReturn;
+ u.func.profilingJumpToProfilingReturn_ = profilingReturn_ - offsets.profilingJump;
+ u.func.profilingEpilogueToProfilingReturn_ = profilingReturn_ - offsets.profilingEpilogue;
+
+ MOZ_ASSERT(offsets.nonProfilingEntry < offsets.end);
+ end_ = offsets.end;
+}
+
+size_t
+CacheableChars::serializedSize() const
+{
+ return sizeof(uint32_t) + strlen(get());
+}
+
+uint8_t*
+CacheableChars::serialize(uint8_t* cursor) const
+{
+ uint32_t length = strlen(get());
+ cursor = WriteBytes(cursor, &length, sizeof(uint32_t));
+ cursor = WriteBytes(cursor, get(), length);
+ return cursor;
+}
+
+const uint8_t*
+CacheableChars::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+ uint32_t length;
+ cursor = ReadBytes(cursor, &length, sizeof(uint32_t));
+
+ reset(js_pod_calloc<char>(length + 1));
+ if (!get())
+ return nullptr;
+
+ cursor = ReadBytes(cursor, get(), length);
+ return cursor;
+}
+
+bool
+CacheableChars::clone(JSContext* cx, CacheableChars* out) const
+{
+ *out = make_string_copy(get());
+ return !!*out;
+}
+
+class Module::AutoMutateCode
+{
+ AutoWritableJitCode awjc_;
+ AutoFlushICache afc_;
+
+ public:
+ AutoMutateCode(JSContext* cx, Module& module, const char* name)
+ : awjc_(cx->runtime(), module.code(), module.pod.codeBytes_),
+ afc_(name)
+ {
+ AutoFlushICache::setRange(uintptr_t(module.code()), module.pod.codeBytes_);
+ }
+};
+
+uint32_t
+Module::totalBytes() const
+{
+ return pod.codeBytes_ + pod.globalBytes_;
+}
+
+uint8_t*
+Module::rawHeapPtr() const
+{
+ return const_cast<Module*>(this)->rawHeapPtr();
+}
+
+uint8_t*&
+Module::rawHeapPtr()
+{
+ return *(uint8_t**)(globalData() + HeapGlobalDataOffset);
+}
+
+void
+Module::specializeToHeap(ArrayBufferObjectMaybeShared* heap)
+{
+ MOZ_ASSERT_IF(heap->is<ArrayBufferObject>(), heap->as<ArrayBufferObject>().isAsmJS());
+ MOZ_ASSERT(!maybeHeap_);
+ MOZ_ASSERT(!rawHeapPtr());
+
+ uint8_t* ptrBase = heap->dataPointerEither().unwrap(/*safe - protected by Module methods*/);
+ uint32_t heapLength = heap->byteLength();
+#if defined(JS_CODEGEN_X86)
+ // An access is out-of-bounds iff
+ // ptr + offset + data-type-byte-size > heapLength
+ // i.e. ptr > heapLength - data-type-byte-size - offset. data-type-byte-size
+ // and offset are already included in the addend so we
+ // just have to add the heap length here.
+ for (const HeapAccess& access : heapAccesses_) {
+ if (access.hasLengthCheck())
+ X86Encoding::AddInt32(access.patchLengthAt(code()), heapLength);
+ void* addr = access.patchHeapPtrImmAt(code());
+ uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
+ MOZ_ASSERT(disp <= INT32_MAX);
+ X86Encoding::SetPointer(addr, (void*)(ptrBase + disp));
+ }
+#elif defined(JS_CODEGEN_X64)
+ // Even with signal handling being used for most bounds checks, there may be
+ // atomic operations that depend on explicit checks.
+ //
+ // If we have any explicit bounds checks, we need to patch the heap length
+ // checks at the right places. All accesses that have been recorded are the
+ // only ones that need bound checks (see also
+ // CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
+ for (const HeapAccess& access : heapAccesses_) {
+ // See comment above for x86 codegen.
+ if (access.hasLengthCheck())
+ X86Encoding::AddInt32(access.patchLengthAt(code()), heapLength);
+ }
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ for (const HeapAccess& access : heapAccesses_)
+ Assembler::UpdateBoundsCheck(heapLength, (Instruction*)(access.insnOffset() + code()));
+#else
+# error "Missing architecture"
+#endif
+
+ maybeHeap_ = heap;
+ rawHeapPtr() = ptrBase;
+}
+
+void
+Module::despecializeFromHeap(ArrayBufferObjectMaybeShared* heap)
+{
+ MOZ_ASSERT_IF(maybeHeap_, maybeHeap_ == heap);
+ MOZ_ASSERT_IF(rawHeapPtr(), rawHeapPtr() == heap->dataPointerEither().unwrap());
+
+#if defined(JS_CODEGEN_X86)
+ uint32_t heapLength = heap->byteLength();
+ uint8_t* ptrBase = heap->dataPointerEither().unwrap(/*safe - used for value*/);
+ for (unsigned i = 0; i < heapAccesses_.length(); i++) {
+ const HeapAccess& access = heapAccesses_[i];
+ if (access.hasLengthCheck())
+ X86Encoding::AddInt32(access.patchLengthAt(code()), -heapLength);
+ void* addr = access.patchHeapPtrImmAt(code());
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(X86Encoding::GetPointer(addr));
+ MOZ_ASSERT(ptr >= ptrBase);
+ X86Encoding::SetPointer(addr, reinterpret_cast<void*>(ptr - ptrBase));
+ }
+#elif defined(JS_CODEGEN_X64)
+ uint32_t heapLength = heap->byteLength();
+ for (unsigned i = 0; i < heapAccesses_.length(); i++) {
+ const HeapAccess& access = heapAccesses_[i];
+ if (access.hasLengthCheck())
+ X86Encoding::AddInt32(access.patchLengthAt(code()), -heapLength);
+ }
+#endif
+
+ maybeHeap_ = nullptr;
+ rawHeapPtr() = nullptr;
+}
+
+void
+Module::sendCodeRangesToProfiler(JSContext* cx)
+{
+#ifdef JS_ION_PERF
+ if (PerfFuncEnabled()) {
+ for (const CodeRange& codeRange : codeRanges_) {
+ if (!codeRange.isFunction())
+ continue;
+
+ uintptr_t start = uintptr_t(code() + codeRange.begin());
+ uintptr_t end = uintptr_t(code() + codeRange.end());
+ uintptr_t size = end - start;
+ const char* file = filename_.get();
+ unsigned line = codeRange.funcLineNumber();
+ unsigned column = 0;
+ const char* name = funcNames_[codeRange.funcNameIndex()].get();
+
+ writePerfSpewerAsmJSFunctionMap(start, size, file, line, column, name);
+ }
+ }
+#endif
+#ifdef MOZ_VTUNE
+ if (IsVTuneProfilingActive()) {
+ for (const CodeRange& codeRange : codeRanges_) {
+ if (!codeRange.isFunction())
+ continue;
+
+ uintptr_t start = uintptr_t(code() + codeRange.begin());
+ uintptr_t end = uintptr_t(code() + codeRange.end());
+ uintptr_t size = end - start;
+ const char* name = funcNames_[codeRange.funcNameIndex()].get();
+
+ unsigned method_id = iJIT_GetNewMethodID();
+ if (method_id == 0)
+ return;
+ iJIT_Method_Load method;
+ method.method_id = method_id;
+ method.method_name = const_cast<char*>(name);
+ method.method_load_address = (void*)start;
+ method.method_size = size;
+ method.line_number_size = 0;
+ method.line_number_table = nullptr;
+ method.class_id = 0;
+ method.class_file_name = nullptr;
+ method.source_file_name = nullptr;
+ iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, (void*)&method);
+ }
+ }
+#endif
+}
+
+Module::ImportExit&
+Module::importToExit(const Import& import)
+{
+ return *reinterpret_cast<ImportExit*>(globalData() + import.exitGlobalDataOffset());
+}
+
+/* static */ Module::CacheablePod
+Module::zeroPod()
+{
+ CacheablePod pod = {0, 0, 0, false, false, false, false};
+ return pod;
+}
+
+void
+Module::init()
+{
+ staticallyLinked_ = false;
+ interrupt_ = nullptr;
+ outOfBounds_ = nullptr;
+ dynamicallyLinked_ = false;
+ prev_ = nullptr;
+ next_ = nullptr;
+ interrupted_ = false;
+
+ *(double*)(globalData() + NaN64GlobalDataOffset) = GenericNaN();
+ *(float*)(globalData() + NaN32GlobalDataOffset) = GenericNaN();
+}
+
+// Private constructor used for deserialization and cloning.
+Module::Module(const CacheablePod& pod,
+ UniqueCodePtr code,
+ ImportVector&& imports,
+ ExportVector&& exports,
+ HeapAccessVector&& heapAccesses,
+ CodeRangeVector&& codeRanges,
+ CallSiteVector&& callSites,
+ CacheableCharsVector&& funcNames,
+ CacheableChars filename,
+ CacheBool loadedFromCache,
+ ProfilingBool profilingEnabled,
+ FuncLabelVector&& funcLabels)
+ : pod(pod),
+ code_(Move(code)),
+ imports_(Move(imports)),
+ exports_(Move(exports)),
+ heapAccesses_(Move(heapAccesses)),
+ codeRanges_(Move(codeRanges)),
+ callSites_(Move(callSites)),
+ funcNames_(Move(funcNames)),
+ filename_(Move(filename)),
+ loadedFromCache_(loadedFromCache),
+ profilingEnabled_(profilingEnabled),
+ funcLabels_(Move(funcLabels))
+{
+ MOZ_ASSERT_IF(!profilingEnabled, funcLabels_.empty());
+ MOZ_ASSERT_IF(profilingEnabled, funcNames_.length() == funcLabels_.length());
+ init();
+}
+
+// Public constructor for compilation.
+Module::Module(CompileArgs args,
+ uint32_t functionBytes,
+ uint32_t codeBytes,
+ uint32_t globalBytes,
+ HeapBool usesHeap,
+ SharedBool sharedHeap,
+ UniqueCodePtr code,
+ ImportVector&& imports,
+ ExportVector&& exports,
+ HeapAccessVector&& heapAccesses,
+ CodeRangeVector&& codeRanges,
+ CallSiteVector&& callSites,
+ CacheableCharsVector&& funcNames,
+ CacheableChars filename)
+ : pod(zeroPod()),
+ code_(Move(code)),
+ imports_(Move(imports)),
+ exports_(Move(exports)),
+ heapAccesses_(Move(heapAccesses)),
+ codeRanges_(Move(codeRanges)),
+ callSites_(Move(callSites)),
+ funcNames_(Move(funcNames)),
+ filename_(Move(filename)),
+ loadedFromCache_(false),
+ profilingEnabled_(false)
+{
+ // Work around MSVC 2013 bug around {} member initialization.
+ const_cast<uint32_t&>(pod.functionBytes_) = functionBytes;
+ const_cast<uint32_t&>(pod.codeBytes_) = codeBytes;
+ const_cast<uint32_t&>(pod.globalBytes_) = globalBytes;
+ const_cast<bool&>(pod.usesHeap_) = bool(usesHeap);
+ const_cast<bool&>(pod.sharedHeap_) = bool(sharedHeap);
+ const_cast<bool&>(pod.usesSignalHandlersForOOB_) = args.useSignalHandlersForOOB;
+ const_cast<bool&>(pod.usesSignalHandlersForInterrupt_) = args.useSignalHandlersForInterrupt;
+
+ MOZ_ASSERT_IF(sharedHeap, usesHeap);
+ init();
+}
+
+Module::~Module()
+{
+ MOZ_ASSERT(!interrupted_);
+
+ if (code_) {
+ for (unsigned i = 0; i < imports_.length(); i++) {
+ ImportExit& exit = importToExit(imports_[i]);
+ if (exit.baselineScript)
+ exit.baselineScript->removeDependentWasmModule(*this, i);
+ }
+ }
+
+ if (prev_)
+ *prev_ = next_;
+ if (next_)
+ next_->prev_ = prev_;
+}
+
+void
+Module::trace(JSTracer* trc)
+{
+ for (const Import& import : imports_) {
+ if (importToExit(import).fun)
+ TraceEdge(trc, &importToExit(import).fun, "wasm function import");
+ }
+
+ if (maybeHeap_)
+ TraceEdge(trc, &maybeHeap_, "wasm buffer");
+}
+
+CompileArgs
+Module::compileArgs() const
+{
+ CompileArgs args;
+ args.useSignalHandlersForOOB = pod.usesSignalHandlersForOOB_;
+ args.useSignalHandlersForInterrupt = pod.usesSignalHandlersForInterrupt_;
+ return args;
+}
+
+bool
+Module::containsFunctionPC(void* pc) const
+{
+ return pc >= code() && pc < (code() + pod.functionBytes_);
+}
+
+bool
+Module::containsCodePC(void* pc) const
+{
+ return pc >= code() && pc < (code() + pod.codeBytes_);
+}
+
+struct CallSiteRetAddrOffset
+{
+ const CallSiteVector& callSites;
+ explicit CallSiteRetAddrOffset(const CallSiteVector& callSites) : callSites(callSites) {}
+ uint32_t operator[](size_t index) const {
+ return callSites[index].returnAddressOffset();
+ }
+};
+
+const CallSite*
+Module::lookupCallSite(void* returnAddress) const
+{
+ uint32_t target = ((uint8_t*)returnAddress) - code();
+ size_t lowerBound = 0;
+ size_t upperBound = callSites_.length();
+
+ size_t match;
+ if (!BinarySearch(CallSiteRetAddrOffset(callSites_), lowerBound, upperBound, target, &match))
+ return nullptr;
+
+ return &callSites_[match];
+}
+
+const CodeRange*
+Module::lookupCodeRange(void* pc) const
+{
+ auto target = CodeRange::PC((uint8_t*)pc - code());
+ size_t lowerBound = 0;
+ size_t upperBound = codeRanges_.length();
+
+ size_t match;
+ if (!BinarySearch(codeRanges_, lowerBound, upperBound, target, &match))
+ return nullptr;
+
+ return &codeRanges_[match];
+}
+
+struct HeapAccessOffset
+{
+ const HeapAccessVector& accesses;
+ explicit HeapAccessOffset(const HeapAccessVector& accesses) : accesses(accesses) {}
+ uintptr_t operator[](size_t index) const {
+ return accesses[index].insnOffset();
+ }
+};
+
+const HeapAccess*
+Module::lookupHeapAccess(void* pc) const
+{
+ MOZ_ASSERT(containsFunctionPC(pc));
+
+ uint32_t target = ((uint8_t*)pc) - code();
+ size_t lowerBound = 0;
+ size_t upperBound = heapAccesses_.length();
+
+ size_t match;
+ if (!BinarySearch(HeapAccessOffset(heapAccesses_), lowerBound, upperBound, target, &match))
+ return nullptr;
+
+ return &heapAccesses_[match];
+}
+
+bool
+Module::staticallyLink(ExclusiveContext* cx, const StaticLinkData& linkData)
+{
+ MOZ_ASSERT(!dynamicallyLinked_);
+ MOZ_ASSERT(!staticallyLinked_);
+ staticallyLinked_ = true;
+
+ // Push a JitContext for benefit of IsCompilingAsmJS and delay flushing
+ // until Module::dynamicallyLink.
+ JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
+ MOZ_ASSERT(IsCompilingAsmJS());
+ AutoFlushICache afc("Module::staticallyLink", /* inhibit = */ true);
+ AutoFlushICache::setRange(uintptr_t(code()), pod.codeBytes_);
+
+ interrupt_ = code() + linkData.pod.interruptOffset;
+ outOfBounds_ = code() + linkData.pod.outOfBoundsOffset;
+
+ for (StaticLinkData::InternalLink link : linkData.internalLinks) {
+ uint8_t* patchAt = code() + link.patchAtOffset;
+ void* target = code() + link.targetOffset;
+ if (profilingEnabled_) {
+ const CodeRange* codeRange = lookupCodeRange(target);
+ if (codeRange && codeRange->isFunction())
+ target = code() + codeRange->funcProfilingEntry();
+ }
+ if (link.isRawPointerPatch())
+ *(void**)(patchAt) = target;
+ else
+ Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
+ }
+
+ for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+ const StaticLinkData::OffsetVector& offsets = linkData.symbolicLinks[imm];
+ for (size_t i = 0; i < offsets.length(); i++) {
+ uint8_t* patchAt = code() + offsets[i];
+ void* target = AddressOf(imm, cx);
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
+ PatchedImmPtr(target),
+ PatchedImmPtr((void*)-1));
+ }
+ }
+
+ for (const StaticLinkData::FuncPtrTable& table : linkData.funcPtrTables) {
+ auto array = reinterpret_cast<void**>(globalData() + table.globalDataOffset);
+ for (size_t i = 0; i < table.elemOffsets.length(); i++) {
+ uint8_t* elem = code() + table.elemOffsets[i];
+ if (profilingEnabled_)
+ elem = code() + lookupCodeRange(elem)->funcProfilingEntry();
+ array[i] = elem;
+ }
+ }
+
+ // CodeRangeVector, CallSiteVector and the code technically have all the
+ // necessary info to do all the updates necessary in setProfilingEnabled.
+ // However, to simplify the finding of function-pointer table sizes and
+ // global-data offsets, save just that information here.
+
+ if (!funcPtrTables_.appendAll(linkData.funcPtrTables)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool
+Module::dynamicallyLink(JSContext* cx, Handle<ArrayBufferObjectMaybeShared*> heap,
+ const AutoVectorRooter<JSFunction*>& imports)
+{
+ MOZ_ASSERT(staticallyLinked_);
+ MOZ_ASSERT(!dynamicallyLinked_);
+ dynamicallyLinked_ = true;
+
+ // Add this module to the JSRuntime-wide list of dynamically-linked modules.
+ next_ = cx->runtime()->linkedWasmModules;
+ prev_ = &cx->runtime()->linkedWasmModules;
+ cx->runtime()->linkedWasmModules = this;
+ if (next_)
+ next_->prev_ = &next_;
+
+ // Push a JitContext for benefit of IsCompilingAsmJS and flush the ICache.
+ // We've been inhibiting flushing up to this point so flush it all now.
+ JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
+ MOZ_ASSERT(IsCompilingAsmJS());
+ AutoFlushICache afc("Module::dynamicallyLink");
+ AutoFlushICache::setRange(uintptr_t(code()), pod.codeBytes_);
+
+ // Initialize imports with actual imported values.
+ MOZ_ASSERT(imports.length() == imports_.length());
+ for (size_t i = 0; i < imports_.length(); i++) {
+ const Import& import = imports_[i];
+ ImportExit& exit = importToExit(import);
+ exit.code = code() + import.interpExitCodeOffset();
+ exit.fun = imports[i];
+ exit.baselineScript = nullptr;
+ }
+
+ // Specialize code to the actual heap.
+ if (heap)
+ specializeToHeap(heap);
+
+ // See AllocateCode comment above.
+ ExecutableAllocator::makeExecutable(code(), pod.codeBytes_);
+
+ sendCodeRangesToProfiler(cx);
+ return true;
+}
+
+ArrayBufferObjectMaybeShared*
+Module::maybeBuffer() const
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ return maybeHeap_;
+}
+
+SharedMem<uint8_t*>
+Module::maybeHeap() const
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ MOZ_ASSERT_IF(!pod.usesHeap_, rawHeapPtr() == nullptr);
+ return pod.sharedHeap_
+ ? SharedMem<uint8_t*>::shared(rawHeapPtr())
+ : SharedMem<uint8_t*>::unshared(rawHeapPtr());
+}
+
+size_t
+Module::heapLength() const
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ return maybeHeap_ ? maybeHeap_->byteLength() : 0;
+}
+
+Module*
+Module::nextLinked() const
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ return next_;
+}
+
+void
+Module::deoptimizeImportExit(uint32_t importIndex)
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ const Import& import = imports_[importIndex];
+ ImportExit& exit = importToExit(import);
+ exit.code = code() + import.interpExitCodeOffset();
+ exit.baselineScript = nullptr;
+}
+
+bool
+Module::hasDetachedHeap() const
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ return pod.usesHeap_ && !maybeHeap_;
+}
+
+bool
+Module::changeHeap(Handle<ArrayBufferObject*> newHeap, JSContext* cx)
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ MOZ_ASSERT(pod.usesHeap_);
+
+ // Content JS should not be able to run (and change heap) from within an
+ // interrupt callback, but in case it does, fail to change heap. Otherwise,
+ // the heap can change at every single instruction which would prevent
+ // future optimizations like heap-base hoisting.
+ if (interrupted_)
+ return false;
+
+ AutoMutateCode amc(cx, *this, "Module::changeHeap");
+ if (maybeHeap_)
+ despecializeFromHeap(maybeHeap_);
+ specializeToHeap(newHeap);
+ return true;
+}
+
+bool
+Module::detachHeap(JSContext* cx)
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ MOZ_ASSERT(pod.usesHeap_);
+
+ // Content JS should not be able to run (and detach heap) from within an
+ // interrupt callback, but in case it does, fail. Otherwise, the heap can
+ // change at an arbitrary instruction and break the assumption below.
+ if (interrupted_) {
+ JS_ReportError(cx, "attempt to detach from inside interrupt handler");
+ return false;
+ }
+
+ // Even if this->active(), to reach here, the activation must have called
+ // out via an import exit stub. FFI stubs check if heapDatum() is null on
+ // reentry and throw an exception if so.
+ MOZ_ASSERT_IF(active(), activation()->exitReason() == ExitReason::ImportJit ||
+ activation()->exitReason() == ExitReason::ImportInterp);
+
+ AutoMutateCode amc(cx, *this, "Module::detachHeap");
+ despecializeFromHeap(maybeHeap_);
+ return true;
+}
+
+void
+Module::setInterrupted(bool interrupted)
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ interrupted_ = interrupted;
+}
+
+AsmJSActivation*&
+Module::activation()
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ return *reinterpret_cast<AsmJSActivation**>(globalData() + ActivationGlobalDataOffset);
+}
+
+Module::EntryFuncPtr
+Module::entryTrampoline(const Export& func) const
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ return JS_DATA_TO_FUNC_PTR(EntryFuncPtr, code() + func.stubOffset());
+}
+
+bool
+Module::callImport(JSContext* cx, uint32_t importIndex, unsigned argc, const Value* argv,
+ MutableHandleValue rval)
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+
+ const Import& import = imports_[importIndex];
+
+ RootedValue fval(cx, ObjectValue(*importToExit(import).fun));
+ if (!Invoke(cx, UndefinedValue(), fval, argc, argv, rval))
+ return false;
+
+ ImportExit& exit = importToExit(import);
+
+ // The exit may already have become optimized.
+ void* jitExitCode = code() + import.jitExitCodeOffset();
+ if (exit.code == jitExitCode)
+ return true;
+
+ // Test if the function is JIT compiled.
+ if (!exit.fun->hasScript())
+ return true;
+ JSScript* script = exit.fun->nonLazyScript();
+ if (!script->hasBaselineScript()) {
+ MOZ_ASSERT(!script->hasIonScript());
+ return true;
+ }
+
+ // Don't enable jit entry when we have a pending ion builder.
+ // Take the interpreter path which will link it and enable
+ // the fast path on the next call.
+ if (script->baselineScript()->hasPendingIonBuilder())
+ return true;
+
+ // Currently we can't rectify arguments. Therefore disable if argc is too low.
+ if (exit.fun->nargs() > import.sig().args().length())
+ return true;
+
+ // Ensure the argument types are included in the argument TypeSets stored in
+ // the TypeScript. This is necessary for Ion, because the import exit will
+ // use the skip-arg-checks entry point.
+ //
+ // Note that the TypeScript is never discarded while the script has a
+ // BaselineScript, so if those checks hold now they must hold at least until
+ // the BaselineScript is discarded and when that happens the import exit is
+ // patched back.
+ if (!TypeScript::ThisTypes(script)->hasType(TypeSet::UndefinedType()))
+ return true;
+ for (uint32_t i = 0; i < exit.fun->nargs(); i++) {
+ TypeSet::Type type = TypeSet::UnknownType();
+ switch (import.sig().args()[i]) {
+ case ValType::I32: type = TypeSet::Int32Type(); break;
+ case ValType::I64: MOZ_CRASH("NYI");
+ case ValType::F32: type = TypeSet::DoubleType(); break;
+ case ValType::F64: type = TypeSet::DoubleType(); break;
+ case ValType::I32x4: MOZ_CRASH("NYI");
+ case ValType::F32x4: MOZ_CRASH("NYI");
+ }
+ if (!TypeScript::ArgTypes(script, i)->hasType(type))
+ return true;
+ }
+
+ // Let's optimize it!
+ if (!script->baselineScript()->addDependentWasmModule(cx, *this, importIndex))
+ return false;
+
+ exit.code = jitExitCode;
+ exit.baselineScript = script->baselineScript();
+ return true;
+}
+
+void
+Module::setProfilingEnabled(bool enabled, JSContext* cx)
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ MOZ_ASSERT(!active());
+
+ if (profilingEnabled_ == enabled)
+ return;
+
+ // When enabled, generate profiling labels for every name in funcNames_
+ // that is the name of some Function CodeRange. This involves malloc() so
+ // do it now since, once we start sampling, we'll be in a signal-handing
+ // context where we cannot malloc.
+ if (enabled) {
+ funcLabels_.resize(funcNames_.length());
+ for (const CodeRange& codeRange : codeRanges_) {
+ if (!codeRange.isFunction())
+ continue;
+ unsigned lineno = codeRange.funcLineNumber();
+ const char* name = funcNames_[codeRange.funcNameIndex()].get();
+ funcLabels_[codeRange.funcNameIndex()] =
+ UniqueChars(JS_smprintf("%s (%s:%u)", name, filename_.get(), lineno));
+ }
+ } else {
+ funcLabels_.clear();
+ }
+
+ // Patch callsites and returns to execute profiling prologues/epililogues.
+ {
+ AutoMutateCode amc(cx, *this, "Module::setProfilingEnabled");
+
+ for (const CallSite& callSite : callSites_)
+ EnableProfilingPrologue(*this, callSite, enabled);
+
+ for (const CodeRange& codeRange : codeRanges_)
+ EnableProfilingEpilogue(*this, codeRange, enabled);
+ }
+
+ // Update the function-pointer tables to point to profiling prologues.
+ for (FuncPtrTable& funcPtrTable : funcPtrTables_) {
+ auto array = reinterpret_cast<void**>(globalData() + funcPtrTable.globalDataOffset);
+ for (size_t i = 0; i < funcPtrTable.numElems; i++) {
+ const CodeRange* codeRange = lookupCodeRange(array[i]);
+ void* from = code() + codeRange->funcNonProfilingEntry();
+ void* to = code() + codeRange->funcProfilingEntry();
+ if (!enabled)
+ Swap(from, to);
+ MOZ_ASSERT(array[i] == from);
+ array[i] = to;
+ }
+ }
+
+ profilingEnabled_ = enabled;
+}
+
+const char*
+Module::profilingLabel(uint32_t funcIndex) const
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+ MOZ_ASSERT(profilingEnabled_);
+ return funcLabels_[funcIndex].get();
+}
+
+size_t
+Module::serializedSize() const
+{
+ return sizeof(pod) +
+ pod.codeBytes_ +
+ SerializedVectorSize(imports_) +
+ SerializedVectorSize(exports_) +
+ SerializedPodVectorSize(heapAccesses_) +
+ SerializedPodVectorSize(codeRanges_) +
+ SerializedPodVectorSize(callSites_) +
+ SerializedVectorSize(funcNames_) +
+ filename_.serializedSize();
+}
+
+uint8_t*
+Module::serialize(uint8_t* cursor) const
+{
+ MOZ_ASSERT(!profilingEnabled_, "assumed by Module::deserialize");
+
+ cursor = WriteBytes(cursor, &pod, sizeof(pod));
+ cursor = WriteBytes(cursor, code(), pod.codeBytes_);
+ cursor = SerializeVector(cursor, imports_);
+ cursor = SerializeVector(cursor, exports_);
+ cursor = SerializePodVector(cursor, heapAccesses_);
+ cursor = SerializePodVector(cursor, codeRanges_);
+ cursor = SerializePodVector(cursor, callSites_);
+ cursor = SerializeVector(cursor, funcNames_);
+ cursor = filename_.serialize(cursor);
+ return cursor;
+}
+
+/* static */ const uint8_t*
+Module::deserialize(ExclusiveContext* cx, const uint8_t* cursor, UniqueModule* out)
+{
+ CacheablePod pod = zeroPod();
+ cursor = ReadBytes(cursor, &pod, sizeof(pod));
+ if (!cursor)
+ return nullptr;
+
+ UniqueCodePtr code = AllocateCode(cx, pod.codeBytes_ + pod.globalBytes_);
+ if (!code)
+ return nullptr;
+
+ cursor = ReadBytes(cursor, code.get(), pod.codeBytes_);
+
+ ImportVector imports;
+ cursor = DeserializeVector(cx, cursor, &imports);
+ if (!cursor)
+ return nullptr;
+
+ ExportVector exports;
+ cursor = DeserializeVector(cx, cursor, &exports);
+ if (!cursor)
+ return nullptr;
+
+ HeapAccessVector heapAccesses;
+ cursor = DeserializePodVector(cx, cursor, &heapAccesses);
+ if (!cursor)
+ return nullptr;
+
+ CodeRangeVector codeRanges;
+ cursor = DeserializePodVector(cx, cursor, &codeRanges);
+ if (!cursor)
+ return nullptr;
+
+ CallSiteVector callSites;
+ cursor = DeserializePodVector(cx, cursor, &callSites);
+ if (!cursor)
+ return nullptr;
+
+ CacheableCharsVector funcNames;
+ cursor = DeserializeVector(cx, cursor, &funcNames);
+ if (!cursor)
+ return nullptr;
+
+ CacheableChars filename;
+ cursor = filename.deserialize(cx, cursor);
+ if (!cursor)
+ return nullptr;
+
+ *out = cx->make_unique<Module>(pod,
+ Move(code),
+ Move(imports),
+ Move(exports),
+ Move(heapAccesses),
+ Move(codeRanges),
+ Move(callSites),
+ Move(funcNames),
+ Move(filename),
+ Module::LoadedFromCache,
+ Module::ProfilingDisabled,
+ FuncLabelVector());
+
+ return cursor;
+}
+
+Module::UniqueModule
+Module::clone(JSContext* cx, const StaticLinkData& linkData) const
+{
+ MOZ_ASSERT(dynamicallyLinked_);
+
+ UniqueCodePtr code = AllocateCode(cx, totalBytes());
+ if (!code)
+ return nullptr;
+
+ memcpy(code.get(), this->code(), pod.codeBytes_);
+
+#ifdef DEBUG
+ // Put the symbolic links back to -1 so PatchDataWithValueCheck assertions
+ // in Module::staticallyLink are valid.
+ for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+ void* callee = AddressOf(imm, cx);
+ const StaticLinkData::OffsetVector& offsets = linkData.symbolicLinks[imm];
+ for (uint32_t offset : offsets) {
+ jit::Assembler::PatchDataWithValueCheck(jit::CodeLocationLabel(code.get() + offset),
+ jit::PatchedImmPtr((void*)-1),
+ jit::PatchedImmPtr(callee));
+ }
+ }
+#endif
+
+ ImportVector imports;
+ if (!CloneVector(cx, imports_, &imports))
+ return nullptr;
+
+ ExportVector exports;
+ if (!CloneVector(cx, exports_, &exports))
+ return nullptr;
+
+ HeapAccessVector heapAccesses;
+ if (!ClonePodVector(cx, heapAccesses_, &heapAccesses))
+ return nullptr;
+
+ CodeRangeVector codeRanges;
+ if (!ClonePodVector(cx, codeRanges_, &codeRanges))
+ return nullptr;
+
+ CallSiteVector callSites;
+ if (!ClonePodVector(cx, callSites_, &callSites))
+ return nullptr;
+
+ CacheableCharsVector funcNames;
+ if (!CloneVector(cx, funcNames_, &funcNames))
+ return nullptr;
+
+ CacheableChars filename;
+ if (!filename_.clone(cx, &filename))
+ return nullptr;
+
+ FuncLabelVector funcLabels;
+ if (!CloneVector(cx, funcLabels_, &funcLabels))
+ return nullptr;
+
+ // Must not GC between Module allocation and (successful) return.
+ auto out = cx->make_unique<Module>(pod,
+ Move(code),
+ Move(imports),
+ Move(exports),
+ Move(heapAccesses),
+ Move(codeRanges),
+ Move(callSites),
+ Move(funcNames),
+ Move(filename),
+ CacheBool::NotLoadedFromCache,
+ ProfilingBool(profilingEnabled_),
+ Move(funcLabels));
+ if (!out)
+ return nullptr;
+
+ // If the copied machine code has been specialized to the heap, it must be
+ // unspecialized in the copy.
+ if (maybeHeap_)
+ out->despecializeFromHeap(maybeHeap_);
+
+ if (!out->staticallyLink(cx, linkData))
+ return nullptr;
+
+ return Move(out);
+}
+
+void
+Module::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode, size_t* asmJSModuleData)
+{
+ *asmJSModuleCode += pod.codeBytes_;
+ *asmJSModuleData += mallocSizeOf(this) +
+ pod.globalBytes_ +
+ SizeOfVectorExcludingThis(imports_, mallocSizeOf) +
+ SizeOfVectorExcludingThis(exports_, mallocSizeOf) +
+ heapAccesses_.sizeOfExcludingThis(mallocSizeOf) +
+ codeRanges_.sizeOfExcludingThis(mallocSizeOf) +
+ callSites_.sizeOfExcludingThis(mallocSizeOf) +
+ funcNames_.sizeOfExcludingThis(mallocSizeOf) +
+ funcPtrTables_.sizeOfExcludingThis(mallocSizeOf);
+}
+
new file mode 100644
--- /dev/null
+++ b/js/src/asmjs/WasmModule.h
@@ -0,0 +1,569 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_module_h
+#define wasm_module_h
+
+#include "asmjs/WasmTypes.h"
+#include "gc/Barrier.h"
+#include "vm/MallocProvider.h"
+
+namespace js {
+
+class AsmJSActivation;
+namespace jit { struct BaselineScript; }
+
+namespace wasm {
+
+// A wasm Module and everything it contains must support serialization,
+// deserialization and cloning. Some data can be simply copied as raw bytes and,
+// as a convention, is stored in an inline CacheablePod struct. Everything else
+// should implement the below methods which are called recusively by the
+// containing Module. The implementation of all these methods are grouped
+// together in WasmSerialize.cpp.
+
+#define WASM_DECLARE_SERIALIZABLE(Type) \
+ size_t serializedSize() const; \
+ uint8_t* serialize(uint8_t* cursor) const; \
+ const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); \
+ bool clone(JSContext* cx, Type* out) const; \
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+// The StaticLinkData contains all the metadata necessary to perform
+// Module::staticallyLink but is not necessary afterwards.
+
+struct StaticLinkData
+{
+ struct InternalLink {
+ enum Kind {
+ RawPointer,
+ CodeLabel,
+ InstructionImmediate
+ };
+ uint32_t patchAtOffset;
+ uint32_t targetOffset;
+
+ InternalLink() = default;
+ explicit InternalLink(Kind kind);
+ bool isRawPointerPatch();
+ };
+ typedef Vector<InternalLink, 0, SystemAllocPolicy> InternalLinkVector;
+
+ typedef Vector<uint32_t, 0, SystemAllocPolicy> OffsetVector;
+ struct SymbolicLinkArray : mozilla::EnumeratedArray<SymbolicAddress,
+ SymbolicAddress::Limit,
+ OffsetVector> {
+ WASM_DECLARE_SERIALIZABLE(SymbolicLinkArray)
+ };
+
+ struct FuncPtrTable {
+ uint32_t globalDataOffset;
+ OffsetVector elemOffsets;
+ explicit FuncPtrTable(uint32_t globalDataOffset) : globalDataOffset(globalDataOffset) {}
+ FuncPtrTable() = default;
+ FuncPtrTable(FuncPtrTable&& rhs)
+ : globalDataOffset(rhs.globalDataOffset), elemOffsets(Move(rhs.elemOffsets))
+ {}
+ WASM_DECLARE_SERIALIZABLE(FuncPtrTable)
+ };
+ typedef Vector<FuncPtrTable, 0, SystemAllocPolicy> FuncPtrTableVector;
+
+ struct CacheablePod {
+ uint32_t interruptOffset;
+ uint32_t outOfBoundsOffset;
+ } pod;
+ InternalLinkVector internalLinks;
+ SymbolicLinkArray symbolicLinks;
+ FuncPtrTableVector funcPtrTables;
+
+ WASM_DECLARE_SERIALIZABLE(StaticLinkData)
+};
+
+typedef UniquePtr<StaticLinkData, JS::DeletePolicy<StaticLinkData>> UniqueStaticLinkData;
+
+// An Export describes an export from a wasm module. Currently only functions
+// can be exported.
+
+class Export
+{
+ MallocSig sig_;
+ struct CacheablePod {
+ uint32_t funcIndex_;
+ uint32_t stubOffset_;
+ } pod;
+
+ public:
+ Export() = default;
+ Export(MallocSig&& sig, uint32_t funcIndex)
+ : sig_(Move(sig))
+ {
+ pod.funcIndex_ = funcIndex;
+ pod.stubOffset_ = UINT32_MAX;
+ }
+ Export(Export&& rhs)
+ : sig_(Move(rhs.sig_)),
+ pod(rhs.pod)
+ {}
+
+ void initStubOffset(uint32_t stubOffset) {
+ MOZ_ASSERT(pod.stubOffset_ == UINT32_MAX);
+ pod.stubOffset_ = stubOffset;
+ }
+
+ uint32_t funcIndex() const {
+ return pod.funcIndex_;
+ }
+ uint32_t stubOffset() const {
+ return pod.stubOffset_;
+ }
+ const MallocSig& sig() const {
+ return sig_;
+ }
+
+ WASM_DECLARE_SERIALIZABLE(Export)
+};
+
+typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
+
+// An Import describes a wasm module import. Currently, only functions can be
+// imported in wasm and a function import also includes the signature used
+// within the module to call that import. An import is slightly different than
+// an asm.js FFI function: a single asm.js FFI function can be called with many
+// different signatures. When compiled to wasm, each unique FFI function paired
+// with signature generates a wasm import.
+
+class Import
+{
+ MallocSig sig_;
+ struct CacheablePod {
+ uint32_t exitGlobalDataOffset_;
+ uint32_t interpExitCodeOffset_;
+ uint32_t jitExitCodeOffset_;
+ } pod;
+
+ public:
+ Import() {}
+ Import(Import&& rhs) : sig_(Move(rhs.sig_)), pod(rhs.pod) {}
+ Import(MallocSig&& sig, uint32_t exitGlobalDataOffset)
+ : sig_(Move(sig))
+ {
+ pod.exitGlobalDataOffset_ = exitGlobalDataOffset;
+ pod.interpExitCodeOffset_ = 0;
+ pod.jitExitCodeOffset_ = 0;
+ }
+
+ void initInterpExitOffset(uint32_t off) {
+ MOZ_ASSERT(!pod.interpExitCodeOffset_);
+ pod.interpExitCodeOffset_ = off;
+ }
+ void initJitExitOffset(uint32_t off) {
+ MOZ_ASSERT(!pod.jitExitCodeOffset_);
+ pod.jitExitCodeOffset_ = off;
+ }
+
+ const MallocSig& sig() const {
+ return sig_;
+ }
+ uint32_t exitGlobalDataOffset() const {
+ return pod.exitGlobalDataOffset_;
+ }
+ uint32_t interpExitCodeOffset() const {
+ MOZ_ASSERT(pod.interpExitCodeOffset_);
+ return pod.interpExitCodeOffset_;
+ }
+ uint32_t jitExitCodeOffset() const {
+ MOZ_ASSERT(pod.jitExitCodeOffset_);
+ return pod.jitExitCodeOffset_;
+ }
+
+ WASM_DECLARE_SERIALIZABLE(Import)
+};
+
+typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
+
+// A CodeRange describes a single contiguous range of code within a wasm
+// module's code segment. A CodeRange describes what the code does and, for
+// function bodies, the name and source coordinates of the function.
+
+class CodeRange
+{
+ uint32_t nameIndex_;
+ uint32_t lineNumber_;
+ uint32_t begin_;
+ uint32_t profilingReturn_;
+ uint32_t end_;
+ union {
+ struct {
+ uint8_t kind_;
+ uint8_t beginToEntry_;
+ uint8_t profilingJumpToProfilingReturn_;
+ uint8_t profilingEpilogueToProfilingReturn_;
+ } func;
+ uint8_t kind_;
+ } u;
+
+ void assertValid();
+
+ public:
+ enum Kind { Function, Entry, ImportJitExit, ImportInterpExit, Interrupt, Inline };
+
+ CodeRange() {}
+ CodeRange(Kind kind, Offsets offsets);
+ CodeRange(Kind kind, ProfilingOffsets offsets);
+ CodeRange(uint32_t nameIndex, uint32_t lineNumber, FuncOffsets offsets);
+
+ // All CodeRanges have a begin and end.
+
+ uint32_t begin() const {
+ return begin_;
+ }
+ uint32_t end() const {
+ return end_;
+ }
+
+ // Other fields are only available for certain CodeRange::Kinds.
+
+ Kind kind() const { return Kind(u.kind_); }
+
+ // Every CodeRange except entry and inline stubs has a profiling return
+ // which is used for asynchronous profiling to determine the frame pointer.
+
+ uint32_t profilingReturn() const {
+ MOZ_ASSERT(kind() != Entry && kind() != Inline);
+ return profilingReturn_;
+ }
+
+ // Functions have offsets which allow patching to selectively execute
+ // profiling prologues/epilogues.
+
+ bool isFunction() const {
+ return kind() == Function;
+ }
+ uint32_t funcProfilingEntry() const {
+ MOZ_ASSERT(isFunction());
+ return begin();
+ }
+ uint32_t funcNonProfilingEntry() const {
+ MOZ_ASSERT(isFunction());
+ return begin_ + u.func.beginToEntry_;
+ }
+ uint32_t functionProfilingJump() const {
+ MOZ_ASSERT(isFunction());
+ return profilingReturn_ - u.func.profilingJumpToProfilingReturn_;
+ }
+ uint32_t funcProfilingEpilogue() const {
+ MOZ_ASSERT(isFunction());
+ return profilingReturn_ - u.func.profilingEpilogueToProfilingReturn_;
+ }
+ uint32_t funcNameIndex() const {
+ MOZ_ASSERT(isFunction());
+ return nameIndex_;
+ }
+ uint32_t funcLineNumber() const {
+ MOZ_ASSERT(isFunction());
+ return lineNumber_;
+ }
+
+ // A sorted array of CodeRanges can be looked up via BinarySearch and PC.
+
+ struct PC {
+ size_t offset;
+ explicit PC(size_t offset) : offset(offset) {}
+ bool operator==(const CodeRange& rhs) const {
+ return offset >= rhs.begin() && offset < rhs.end();
+ }
+ bool operator<(const CodeRange& rhs) const {
+ return offset < rhs.begin();
+ }
+ };
+};
+
+typedef Vector<CodeRange, 0, SystemAllocPolicy> CodeRangeVector;
+
+// A CacheableChars is used to cacheably store UniqueChars in Module.
+
+struct CacheableChars : public UniqueChars
+{
+ explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
+ MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs) : UniqueChars(Move(rhs)) {}
+ CacheableChars() = default;
+ CacheableChars(CacheableChars&& rhs) : UniqueChars(Move(rhs)) {}
+ void operator=(CacheableChars&& rhs) { UniqueChars& base = *this; base = Move(rhs); }
+ WASM_DECLARE_SERIALIZABLE(CacheableChars)
+};
+typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
+
+// A UniqueCodePtr owns allocated executable code. Code passed to the Module
+// constructor must be allocated via AllocateCode.
+
+class CodeDeleter
+{
+ uint32_t bytes_;
+ public:
+ explicit CodeDeleter(uint32_t bytes) : bytes_(bytes) {}
+ void operator()(uint8_t* p);
+};
+typedef JS::UniquePtr<uint8_t, CodeDeleter> UniqueCodePtr;
+
+UniqueCodePtr
+AllocateCode(ExclusiveContext* cx, size_t bytes);
+
+// Module represents a compiled WebAssembly module which lives until the last
+// reference to any exported functions is dropped. Modules must be wrapped by a
+// rooted JSObject immediately after creation so that Module::trace() is called
+// during GC. Modules are created after compilation completes and start in a
+// a fully unlinked state. After creation, a module must be first statically
+// linked and then dynamically linked:
+//
+// - Static linking patches code or global data that relies on absolute
+// addresses. Static linking should happen after a module is serialized into
+// a cache file so that the cached code is stored unlinked and ready to be
+// statically linked after deserialization.
+//
+// - Dynamic linking patches code or global data that relies on the address of
+// the heap and imports of a module. A module may only be dynamically linked
+// once. However, a dynamically-linked module may be cloned so that the clone
+// can be independently dynamically linked.
+//
+// Once fully dynamically linked, a module can have its exports invoked (via
+// entryTrampoline). While executing, profiling may be enabled/disabled (when
+// the Module is not active()) via setProfilingEnabled(). When profiling is
+// enabled, a module's frames will be visible to wasm::ProfilingFrameIterator.
+
+class Module
+{
+ struct ImportExit {
+ void* code;
+ jit::BaselineScript* baselineScript;
+ HeapPtrFunction fun;
+ static_assert(sizeof(HeapPtrFunction) == sizeof(void*), "for JIT access");
+ };
+ struct FuncPtrTable {
+ uint32_t globalDataOffset;
+ uint32_t numElems;
+ explicit FuncPtrTable(const StaticLinkData::FuncPtrTable& table)
+ : globalDataOffset(table.globalDataOffset),
+ numElems(table.elemOffsets.length())
+ {}
+ };
+ typedef Vector<FuncPtrTable, 0, SystemAllocPolicy> FuncPtrTableVector;
+ typedef Vector<CacheableChars, 0, SystemAllocPolicy> FuncLabelVector;
+ typedef RelocatablePtrArrayBufferObjectMaybeShared BufferPtr;
+
+ // Initialized when constructed:
+ struct CacheablePod {
+ const uint32_t functionBytes_;
+ const uint32_t codeBytes_;
+ const uint32_t globalBytes_;
+ const bool usesHeap_;
+ const bool sharedHeap_;
+ const bool usesSignalHandlersForOOB_;
+ const bool usesSignalHandlersForInterrupt_;
+ } pod;
+ const UniqueCodePtr code_;
+ const ImportVector imports_;
+ const ExportVector exports_;
+ const HeapAccessVector heapAccesses_;
+ const CodeRangeVector codeRanges_;
+ const CallSiteVector callSites_;
+ const CacheableCharsVector funcNames_;
+ const CacheableChars filename_;
+ const bool loadedFromCache_;
+
+ // Initialized during staticallyLink:
+ bool staticallyLinked_;
+ uint8_t* interrupt_;
+ uint8_t* outOfBounds_;
+ FuncPtrTableVector funcPtrTables_;
+
+ // Initialized during dynamicallyLink:
+ bool dynamicallyLinked_;
+ BufferPtr maybeHeap_;
+ Module** prev_;
+ Module* next_;
+
+ // Mutated after dynamicallyLink:
+ bool profilingEnabled_;
+ FuncLabelVector funcLabels_;
+ bool interrupted_;
+
+ class AutoMutateCode;
+
+ uint32_t totalBytes() const;
+ uint8_t* rawHeapPtr() const;
+ uint8_t*& rawHeapPtr();
+ void specializeToHeap(ArrayBufferObjectMaybeShared* heap);
+ void despecializeFromHeap(ArrayBufferObjectMaybeShared* heap);
+ void sendCodeRangesToProfiler(JSContext* cx);
+ ImportExit& importToExit(const Import& import);
+
+ enum CacheBool { NotLoadedFromCache = false, LoadedFromCache = true };
+ enum ProfilingBool { ProfilingDisabled = false, ProfilingEnabled = true };
+
+ static CacheablePod zeroPod();
+ void init();
+ Module(const CacheablePod& pod,
+ UniqueCodePtr code,
+ ImportVector&& imports,
+ ExportVector&& exports,
+ HeapAccessVector&& heapAccesses,
+ CodeRangeVector&& codeRanges,
+ CallSiteVector&& callSites,
+ CacheableCharsVector&& funcNames,
+ CacheableChars filename,
+ CacheBool loadedFromCache,
+ ProfilingBool profilingEnabled,
+ FuncLabelVector&& funcLabels);
+
+ template <class> friend struct js::MallocProvider;
+
+ public:
+ static const unsigned SizeOfImportExit = sizeof(ImportExit);
+ static const unsigned OffsetOfImportExitFun = offsetof(ImportExit, fun);
+
+ enum HeapBool { DoesntUseHeap = false, UsesHeap = true };
+ enum SharedBool { UnsharedHeap = false, SharedHeap = true };
+
+ Module(CompileArgs args,
+ uint32_t functionBytes,
+ uint32_t codeBytes,
+ uint32_t globalBytes,
+ HeapBool usesHeap,
+ SharedBool sharedHeap,
+ UniqueCodePtr code,
+ ImportVector&& imports,
+ ExportVector&& exports,
+ HeapAccessVector&& heapAccesses,
+ CodeRangeVector&& codeRanges,
+ CallSiteVector&& callSites,
+ CacheableCharsVector&& funcNames,
+ CacheableChars filename);
+ ~Module();
+ void trace(JSTracer* trc);
+
+ uint8_t* code() const { return code_.get(); }
+ uint8_t* globalData() const { return code() + pod.codeBytes_; }
+ uint32_t globalBytes() const { return pod.globalBytes_; }
+ bool usesHeap() const { return pod.usesHeap_; }
+ bool sharedHeap() const { return pod.sharedHeap_; }
+ CompileArgs compileArgs() const;
+ const ImportVector& imports() const { return imports_; }
+ const ExportVector& exports() const { return exports_; }
+ const char* functionName(uint32_t i) const { return funcNames_[i].get(); }
+ const char* filename() const { return filename_.get(); }
+ bool loadedFromCache() const { return loadedFromCache_; }
+ bool staticallyLinked() const { return staticallyLinked_; }
+ bool dynamicallyLinked() const { return dynamicallyLinked_; }
+ bool profilingEnabled() const { return profilingEnabled_; }
+
+ // The range [0, functionBytes) is a subrange of [0, codeBytes) that
+ // contains only function body code, not the stub code. This distinction is
+ // used by the async interrupt handler to only interrupt when the pc is in
+ // function code which, in turn, simplifies reasoning about how stubs
+ // enter/exit.
+
+ bool containsFunctionPC(void* pc) const;
+ bool containsCodePC(void* pc) const;
+ const CallSite* lookupCallSite(void* returnAddress) const;
+ const CodeRange* lookupCodeRange(void* pc) const;
+ const HeapAccess* lookupHeapAccess(void* pc) const;
+
+ // This function transitions the module from an unlinked state to a
+ // statically-linked state. The given StaticLinkData must have come from the
+ // compilation of this module.
+
+ bool staticallyLink(ExclusiveContext* cx, const StaticLinkData& linkData);
+
+ // This function transitions the module from a statically-linked state to a
+ // dynamically-linked state. If this module usesHeap(), a non-null heap
+ // buffer must be given. The given import vector must match the module's
+ // ImportVector.
+
+ bool dynamicallyLink(JSContext* cx, Handle<ArrayBufferObjectMaybeShared*> heap,
+ const AutoVectorRooter<JSFunction*>& imports);
+
+ // The wasm heap, established by dynamicallyLink.
+
+ ArrayBufferObjectMaybeShared* maybeBuffer() const;
+ SharedMem<uint8_t*> maybeHeap() const;
+ size_t heapLength() const;
+ Module* nextLinked() const;
+
+ // asm.js may detach and change the heap at any time. As an internal detail,
+ // the heap may not be changed while the module has been asynchronously
+ // interrupted.
+
+ bool hasDetachedHeap() const;
+ bool changeHeap(Handle<ArrayBufferObject*> newBuffer, JSContext* cx);
+ bool detachHeap(JSContext* cx);
+ void setInterrupted(bool interrupted);
+
+ // The exports of a wasm module are called by preparing an array of
+ // arguments (coerced to the corresponding types of the Export signature)
+ // and calling the export's entry trampoline. All such calls must be
+ // associated with a containing AsmJSActivation. The innermost
+ // AsmJSActivation must be maintained in the Module::activation field.
+
+ struct EntryArg {
+ uint64_t lo;
+ uint64_t hi;
+ };
+ typedef int32_t (*EntryFuncPtr)(EntryArg* args, uint8_t* global);
+ EntryFuncPtr entryTrampoline(const Export& func) const;
+ AsmJSActivation*& activation();
+
+ // Initially, calls to imports in wasm code call out through the generic
+ // callImport method. If the imported callee gets JIT compiled and the types
+ // match up, callImport will patch the code to instead call through a thunk
+ // directly into the JIT code. If the JIT code is released, the Module must
+ // be notified so it can go back to the generic callImport.
+
+ bool callImport(JSContext* cx, uint32_t importIndex, unsigned argc, const Value* argv,
+ MutableHandleValue rval);
+ void deoptimizeImportExit(uint32_t importIndex);
+
+ // At runtime, when $pc is in wasm function code (containsFunctionPC($pc)),
+ // $pc may be moved abruptly to interrupt() or outOfBounds() by a signal
+ // handler or SetContext() from another thread.
+
+ uint8_t* interrupt() const { MOZ_ASSERT(staticallyLinked_); return interrupt_; }
+ uint8_t* outOfBounds() const { MOZ_ASSERT(staticallyLinked_); return outOfBounds_; }
+
+ // When a module is inactive (no live activations), the profiling mode
+ // can be toggled. WebAssembly frames only show up in the
+ // ProfilingFrameIterator when profilign is enabled.
+
+ bool active() { return !!activation(); }
+ void setProfilingEnabled(bool enabled, JSContext* cx);
+ const char* profilingLabel(uint32_t funcIndex) const;
+
+ // See WASM_DECLARE_SERIALIZABLE.
+ size_t serializedSize() const;
+ uint8_t* serialize(uint8_t* cursor) const;
+ typedef UniquePtr<Module, JS::DeletePolicy<Module>> UniqueModule;
+ static const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor,
+ UniqueModule* out);
+ UniqueModule clone(JSContext* cx, const StaticLinkData& linkData) const;
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode,
+ size_t* asmJSModuleData);
+};
+
+} // namespace js
+} // namespace wasm
+
+#endif // wasm_module_h
new file mode 100644
--- /dev/null
+++ b/js/src/asmjs/WasmSerialize.h
@@ -0,0 +1,350 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_serialize_h
+#define wasm_serialize_h
+
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace wasm {
+
+// Factor out common serialization, cloning and about:memory size-computation
+// functions for reuse when serializing wasm and asm.js modules.
+
+static inline uint8_t*
+WriteBytes(uint8_t* dst, const void* src, size_t nbytes)
+{
+ memcpy(dst, src, nbytes);
+ return dst + nbytes;
+}
+
+static inline const uint8_t*
+ReadBytes(const uint8_t* src, void* dst, size_t nbytes)
+{
+ memcpy(dst, src, nbytes);
+ return src + nbytes;
+}
+
+template <class T>
+static inline uint8_t*
+WriteScalar(uint8_t* dst, T t)
+{
+ memcpy(dst, &t, sizeof(t));
+ return dst + sizeof(t);
+}
+
+template <class T>
+static inline const uint8_t*
+ReadScalar(const uint8_t* src, T* dst)
+{
+ memcpy(dst, src, sizeof(*dst));
+ return src + sizeof(*dst);
+}
+
+static inline size_t
+SerializedNameSize(PropertyName* name)
+{
+ size_t s = sizeof(uint32_t);
+ if (name)
+ s += name->length() * (name->hasLatin1Chars() ? sizeof(Latin1Char) : sizeof(char16_t));
+ return s;
+}
+
+static inline uint8_t*
+SerializeName(uint8_t* cursor, PropertyName* name)
+{
+ MOZ_ASSERT_IF(name, !name->empty());
+ if (name) {
+ static_assert(JSString::MAX_LENGTH <= INT32_MAX, "String length must fit in 31 bits");
+ uint32_t length = name->length();
+ uint32_t lengthAndEncoding = (length << 1) | uint32_t(name->hasLatin1Chars());
+ cursor = WriteScalar<uint32_t>(cursor, lengthAndEncoding);
+ JS::AutoCheckCannotGC nogc;
+ if (name->hasLatin1Chars())
+ cursor = WriteBytes(cursor, name->latin1Chars(nogc), length * sizeof(Latin1Char));
+ else
+ cursor = WriteBytes(cursor, name->twoByteChars(nogc), length * sizeof(char16_t));
+ } else {
+ cursor = WriteScalar<uint32_t>(cursor, 0);
+ }
+ return cursor;
+}
+
+template <typename CharT>
+static inline const uint8_t*
+DeserializeChars(ExclusiveContext* cx, const uint8_t* cursor, size_t length, PropertyName** name)
+{
+ Vector<CharT> tmp(cx);
+ CharT* src;
+ if ((size_t(cursor) & (sizeof(CharT) - 1)) != 0) {
+ // Align 'src' for AtomizeChars.
+ if (!tmp.resize(length))
+ return nullptr;
+ memcpy(tmp.begin(), cursor, length * sizeof(CharT));
+ src = tmp.begin();
+ } else {
+ src = (CharT*)cursor;
+ }
+
+ JSAtom* atom = AtomizeChars(cx, src, length);
+ if (!atom)
+ return nullptr;
+
+ *name = atom->asPropertyName();
+ return cursor + length * sizeof(CharT);
+}
+
+static inline const uint8_t*
+DeserializeName(ExclusiveContext* cx, const uint8_t* cursor, PropertyName** name)
+{
+ uint32_t lengthAndEncoding;
+ cursor = ReadScalar<uint32_t>(cursor, &lengthAndEncoding);
+
+ uint32_t length = lengthAndEncoding >> 1;
+ if (length == 0) {
+ *name = nullptr;
+ return cursor;
+ }
+
+ bool latin1 = lengthAndEncoding & 0x1;
+ return latin1
+ ? DeserializeChars<Latin1Char>(cx, cursor, length, name)
+ : DeserializeChars<char16_t>(cx, cursor, length, name);
+}
+
+template <class T, size_t N>
+static inline size_t
+SerializedVectorSize(const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
+{
+ size_t size = sizeof(uint32_t);
+ for (size_t i = 0; i < vec.length(); i++)
+ size += vec[i].serializedSize();
+ return size;
+}
+
+template <class T, size_t N>
+static inline uint8_t*
+SerializeVector(uint8_t* cursor, const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
+{
+ cursor = WriteScalar<uint32_t>(cursor, vec.length());
+ for (size_t i = 0; i < vec.length(); i++)
+ cursor = vec[i].serialize(cursor);
+ return cursor;
+}
+
+template <class T, size_t N>
+static inline const uint8_t*
+DeserializeVector(ExclusiveContext* cx, const uint8_t* cursor,
+ mozilla::Vector<T, N, SystemAllocPolicy>* vec)
+{
+ uint32_t length;
+ cursor = ReadScalar<uint32_t>(cursor, &length);
+ if (!vec->resize(length))
+ return nullptr;
+ for (size_t i = 0; i < vec->length(); i++) {
+ if (!(cursor = (*vec)[i].deserialize(cx, cursor)))
+ return nullptr;
+ }
+ return cursor;
+}
+
+template <class T, size_t N>
+static inline bool
+CloneVector(JSContext* cx, const mozilla::Vector<T, N, SystemAllocPolicy>& in,
+ mozilla::Vector<T, N, SystemAllocPolicy>* out)
+{
+ if (!out->resize(in.length()))
+ return false;
+ for (size_t i = 0; i < in.length(); i++) {
+ if (!in[i].clone(cx, &(*out)[i]))
+ return false;
+ }
+ return true;
+}
+
+template <class T, size_t N>
+static inline size_t
+SizeOfVectorExcludingThis(const mozilla::Vector<T, N, SystemAllocPolicy>& vec,
+ MallocSizeOf mallocSizeOf)
+{
+ size_t size = vec.sizeOfExcludingThis(mallocSizeOf);
+ for (const T& t : vec)
+ size += t.sizeOfExcludingThis(mallocSizeOf);
+ return size;
+}
+
+template <class T, size_t N>
+static inline size_t
+SerializedPodVectorSize(const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
+{
+ return sizeof(uint32_t) +
+ vec.length() * sizeof(T);
+}
+
+template <class T, size_t N>
+static inline uint8_t*
+SerializePodVector(uint8_t* cursor, const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
+{
+ cursor = WriteScalar<uint32_t>(cursor, vec.length());
+ cursor = WriteBytes(cursor, vec.begin(), vec.length() * sizeof(T));
+ return cursor;
+}
+
+template <class T, size_t N>
+static inline const uint8_t*
+DeserializePodVector(ExclusiveContext* cx, const uint8_t* cursor,
+ mozilla::Vector<T, N, SystemAllocPolicy>* vec)
+{
+ uint32_t length;
+ cursor = ReadScalar<uint32_t>(cursor, &length);
+ if (!vec->resize(length))
+ return nullptr;
+ cursor = ReadBytes(cursor, vec->begin(), length * sizeof(T));
+ return cursor;
+}
+
+template <class T, size_t N>
+static inline bool
+ClonePodVector(JSContext* cx, const mozilla::Vector<T, N, SystemAllocPolicy>& in,
+ mozilla::Vector<T, N, SystemAllocPolicy>* out)
+{
+ if (!out->resize(in.length()))
+ return false;
+ mozilla::PodCopy(out->begin(), in.begin(), in.length());
+ return true;
+}
+
+static inline bool
+GetCPUID(uint32_t* cpuId)
+{
+ enum Arch {
+ X86 = 0x1,
+ X64 = 0x2,
+ ARM = 0x3,
+ MIPS = 0x4,
+ MIPS64 = 0x5,
+ ARCH_BITS = 3
+ };
+
+#if defined(JS_CODEGEN_X86)
+ MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
+ *cpuId = X86 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
+ return true;
+#elif defined(JS_CODEGEN_X64)
+ MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
+ *cpuId = X64 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
+ return true;
+#elif defined(JS_CODEGEN_ARM)
+ MOZ_ASSERT(jit::GetARMFlags() <= (UINT32_MAX >> ARCH_BITS));
+ *cpuId = ARM | (jit::GetARMFlags() << ARCH_BITS);
+ return true;
+#elif defined(JS_CODEGEN_MIPS32)
+ MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
+ *cpuId = MIPS | (jit::GetMIPSFlags() << ARCH_BITS);
+ return true;
+#elif defined(JS_CODEGEN_MIPS64)
+ MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
+ *cpuId = MIPS64 | (jit::GetMIPSFlags() << ARCH_BITS);
+ return true;
+#else
+ return false;
+#endif
+}
+
+class MachineId
+{
+ uint32_t cpuId_;
+ JS::BuildIdCharVector buildId_;
+
+ public:
+ bool extractCurrentState(ExclusiveContext* cx) {
+ if (!cx->asmJSCacheOps().buildId)
+ return false;
+ if (!cx->asmJSCacheOps().buildId(&buildId_))
+ return false;
+ if (!GetCPUID(&cpuId_))
+ return false;
+ return true;
+ }
+
+ size_t serializedSize() const {
+ return sizeof(uint32_t) +
+ SerializedPodVectorSize(buildId_);
+ }
+
+ uint8_t* serialize(uint8_t* cursor) const {
+ cursor = WriteScalar<uint32_t>(cursor, cpuId_);
+ cursor = SerializePodVector(cursor, buildId_);
+ return cursor;
+ }
+
+ const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor) {
+ (cursor = ReadScalar<uint32_t>(cursor, &cpuId_)) &&
+ (cursor = DeserializePodVector(cx, cursor, &buildId_));
+ return cursor;
+ }
+
+ bool operator==(const MachineId& rhs) const {
+ return cpuId_ == rhs.cpuId_ &&
+ buildId_.length() == rhs.buildId_.length() &&
+ mozilla::PodEqual(buildId_.begin(), rhs.buildId_.begin(), buildId_.length());
+ }
+ bool operator!=(const MachineId& rhs) const {
+ return !(*this == rhs);
+ }
+};
+
+struct ScopedCacheEntryOpenedForWrite
+{
+ ExclusiveContext* cx;
+ const size_t serializedSize;
+ uint8_t* memory;
+ intptr_t handle;
+
+ ScopedCacheEntryOpenedForWrite(ExclusiveContext* cx, size_t serializedSize)
+ : cx(cx), serializedSize(serializedSize), memory(nullptr), handle(-1)
+ {}
+
+ ~ScopedCacheEntryOpenedForWrite() {
+ if (memory)
+ cx->asmJSCacheOps().closeEntryForWrite(serializedSize, memory, handle);
+ }
+};
+
+struct ScopedCacheEntryOpenedForRead
+{
+ ExclusiveContext* cx;
+ size_t serializedSize;
+ const uint8_t* memory;
+ intptr_t handle;
+
+ explicit ScopedCacheEntryOpenedForRead(ExclusiveContext* cx)
+ : cx(cx), serializedSize(0), memory(nullptr), handle(0)
+ {}
+
+ ~ScopedCacheEntryOpenedForRead() {
+ if (memory)
+ cx->asmJSCacheOps().closeEntryForRead(serializedSize, memory, handle);
+ }
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_serialize_h
rename from js/src/asmjs/AsmJSSignalHandlers.cpp
rename to js/src/asmjs/WasmSignalHandlers.cpp
--- a/js/src/asmjs/AsmJSSignalHandlers.cpp
+++ b/js/src/asmjs/WasmSignalHandlers.cpp
@@ -11,28 +11,30 @@
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include "asmjs/AsmJSSignalHandlers.h"
+#include "asmjs/WasmSignalHandlers.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/PodOperations.h"
#include "asmjs/AsmJSModule.h"
+#include "asmjs/AsmJSValidate.h"
#include "jit/AtomicOperations.h"
#include "jit/Disassembler.h"
#include "vm/Runtime.h"
using namespace js;
using namespace js::jit;
+using namespace js::wasm;
using JS::GenericNaN;
using mozilla::DebugOnly;
using mozilla::PodArrayZero;
#if defined(ANDROID)
# include <sys/system_properties.h>
# if defined(MOZ_LINKER)
@@ -595,22 +597,22 @@ ComputeAccessAddress(EMULATOR_CONTEXT* c
result += index * (uintptr_t(1) << address.scale());
}
return reinterpret_cast<uint8_t*>(result);
}
MOZ_COLD static uint8_t*
EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
- const HeapAccess* heapAccess, const AsmJSModule& module)
+ const HeapAccess* heapAccess, const Module& module)
{
MOZ_RELEASE_ASSERT(module.containsFunctionPC(pc));
- MOZ_RELEASE_ASSERT(module.usesSignalHandlersForOOB());
+ MOZ_RELEASE_ASSERT(module.compileArgs().useSignalHandlersForOOB);
MOZ_RELEASE_ASSERT(!heapAccess->hasLengthCheck());
- MOZ_RELEASE_ASSERT(heapAccess->insnOffset() == (pc - module.codeBase()));
+ MOZ_RELEASE_ASSERT(heapAccess->insnOffset() == (pc - module.code()));
// Disassemble the instruction which caused the trap so that we can extract
// information about it and decide what to do.
Disassembler::HeapAccess access;
uint8_t* end = Disassembler::DisassembleHeapAccess(pc, &access);
const Disassembler::ComplexAddress& address = access.address();
MOZ_RELEASE_ASSERT(end > pc);
MOZ_RELEASE_ASSERT(module.containsFunctionPC(end));
@@ -699,17 +701,17 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
case Disassembler::HeapAccess::Unknown:
MOZ_CRASH("Failed to disassemble instruction");
}
} else {
// We now know that this is an out-of-bounds access made by an asm.js
// load/store that we should handle.
if (heapAccess->throwOnOOB())
- return module.outOfBoundsExit();
+ return module.outOfBounds();
switch (access.kind()) {
case Disassembler::HeapAccess::Load:
case Disassembler::HeapAccess::LoadSext32:
// Assign the JS-defined result value to the destination register
// (ToInt32(undefined) or ToNumber(undefined), determined by the
// type of the destination register). Very conveniently, we can
// infer the type from the register class, since all SIMD accesses
@@ -750,39 +752,39 @@ HandleFault(PEXCEPTION_POINTERS exceptio
if (!rt || rt->handlingSignal)
return false;
AutoSetHandlingSignal handling(rt);
AsmJSActivation* activation = rt->asmJSActivationStack();
if (!activation)
return false;
- const AsmJSModule& module = activation->module();
+ const Module& module = activation->module().wasm();
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(record->ExceptionInformation[1]);
if (!module.maybeHeap() ||
faultingAddress < module.maybeHeap() ||
faultingAddress >= module.maybeHeap() + AsmJSMappedSize)
{
return false;
}
if (!module.containsFunctionPC(pc)) {
// On Windows, it is possible for InterruptRunningCode to execute
// between a faulting heap access and the handling of the fault due
// to InterruptRunningCode's use of SuspendThread. When this happens,
// after ResumeThread, the exception handler is called with pc equal to
- // module.interruptExit, which is logically wrong. The Right Thing would
+ // module.interrupt, which is logically wrong. The Right Thing would
// be for the OS to make fault-handling atomic (so that CONTEXT.pc was
// always the logically-faulting pc). Fortunately, we can detect this
// case and silence the exception ourselves (the exception will
// retrigger after the interrupt jumps back to resumePC).
- if (pc == module.interruptExit() &&
+ if (pc == module.interrupt() &&
module.containsFunctionPC(activation->resumePC()) &&
module.lookupHeapAccess(activation->resumePC()))
{
return true;
}
return false;
}
@@ -897,17 +899,17 @@ HandleMachException(JSRuntime* rt, const
if (request.body.exception != EXC_BAD_ACCESS || request.body.codeCnt != 2)
return false;
AsmJSActivation* activation = rt->asmJSActivationStack();
if (!activation)
return false;
- const AsmJSModule& module = activation->module();
+ const Module& module = activation->module().wasm();
if (!module.containsFunctionPC(pc))
return false;
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(request.body.code[1]);
if (!module.maybeHeap() ||
faultingAddress < module.maybeHeap() ||
@@ -934,32 +936,32 @@ HandleMachException(JSRuntime* rt, const
}
// Taken from mach_exc in /usr/include/mach/mach_exc.defs.
static const mach_msg_id_t sExceptionId = 2405;
// The choice of id here is arbitrary, the only constraint is that sQuitId != sExceptionId.
static const mach_msg_id_t sQuitId = 42;
-void
-AsmJSMachExceptionHandlerThread(void* threadArg)
+static void
+MachExceptionHandlerThread(void* threadArg)
{
JSRuntime* rt = reinterpret_cast<JSRuntime*>(threadArg);
- mach_port_t port = rt->asmJSMachExceptionHandler.port();
+ mach_port_t port = rt->wasmMachExceptionHandler.port();
kern_return_t kret;
while(true) {
ExceptionRequest request;
kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
// If we fail even receiving the message, we can't even send a reply!
// Rather than hanging the faulting thread (hanging the browser), crash.
if (kret != KERN_SUCCESS) {
- fprintf(stderr, "AsmJSMachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret);
+ fprintf(stderr, "MachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret);
MOZ_CRASH();
}
// There are only two messages we should be receiving: an exception
// message that occurs when the runtime's thread faults and the quit
// message sent when the runtime is shutting down.
if (request.body.Head.msgh_id == sQuitId)
break;
@@ -987,24 +989,24 @@ AsmJSMachExceptionHandlerThread(void* th
reply.Head.msgh_id = request.body.Head.msgh_id + 100;
reply.NDR = NDR_record;
reply.RetCode = replyCode;
mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
}
}
-AsmJSMachExceptionHandler::AsmJSMachExceptionHandler()
+MachExceptionHandler::MachExceptionHandler()
: installed_(false),
thread_(nullptr),
port_(MACH_PORT_NULL)
{}
void
-AsmJSMachExceptionHandler::uninstall()
+MachExceptionHandler::uninstall()
{
if (installed_) {
thread_port_t thread = mach_thread_self();
kern_return_t kret = thread_set_exception_ports(thread,
EXC_MASK_BAD_ACCESS,
MACH_PORT_NULL,
EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
THREAD_STATE_NONE);
@@ -1020,48 +1022,48 @@ AsmJSMachExceptionHandler::uninstall()
msg.msgh_size = sizeof(msg);
msg.msgh_remote_port = port_;
msg.msgh_local_port = MACH_PORT_NULL;
msg.msgh_reserved = 0;
msg.msgh_id = sQuitId;
kern_return_t kret = mach_msg(&msg, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL,
MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
if (kret != KERN_SUCCESS) {
- fprintf(stderr, "AsmJSMachExceptionHandler: failed to send quit message: %d\n", (int)kret);
+ fprintf(stderr, "MachExceptionHandler: failed to send quit message: %d\n", (int)kret);
MOZ_CRASH();
}
// Wait for the handler thread to complete before deallocating the port.
PR_JoinThread(thread_);
thread_ = nullptr;
}
if (port_ != MACH_PORT_NULL) {
DebugOnly<kern_return_t> kret = mach_port_destroy(mach_task_self(), port_);
MOZ_ASSERT(kret == KERN_SUCCESS);
port_ = MACH_PORT_NULL;
}
}
bool
-AsmJSMachExceptionHandler::install(JSRuntime* rt)
+MachExceptionHandler::install(JSRuntime* rt)
{
MOZ_ASSERT(!installed());
kern_return_t kret;
mach_port_t thread;
// Get a port which can send and receive data.
kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port_);
if (kret != KERN_SUCCESS)
goto error;
kret = mach_port_insert_right(mach_task_self(), port_, port_, MACH_MSG_TYPE_MAKE_SEND);
if (kret != KERN_SUCCESS)
goto error;
// Create a thread to block on reading port_.
- thread_ = PR_CreateThread(PR_USER_THREAD, AsmJSMachExceptionHandlerThread, rt,
+ thread_ = PR_CreateThread(PR_USER_THREAD, MachExceptionHandlerThread, rt,
PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0);
if (!thread_)
goto error;
// Direct exceptions on this thread to port_ (and thus our handler thread).
// Note: we are totally clobbering any existing *thread* exception ports and
// not even attempting to forward. Breakpad and gdb both use the *process*
// exception ports which are only called if the thread doesn't handle the
@@ -1107,17 +1109,17 @@ HandleFault(int signum, siginfo_t* info,
if (!rt || rt->handlingSignal)
return false;
AutoSetHandlingSignal handling(rt);
AsmJSActivation* activation = rt->asmJSActivationStack();
if (!activation)
return false;
- const AsmJSModule& module = activation->module();
+ const Module& module = activation->module().wasm();
if (!module.containsFunctionPC(pc))
return false;
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(info->si_addr);
if (!module.maybeHeap() ||
faultingAddress < module.maybeHeap() ||
@@ -1180,28 +1182,28 @@ RedirectIonBackedgesToInterruptCheck(JSR
}
static bool
RedirectJitCodeToInterruptCheck(JSRuntime* rt, CONTEXT* context)
{
RedirectIonBackedgesToInterruptCheck(rt);
if (AsmJSActivation* activation = rt->asmJSActivationStack()) {
- const AsmJSModule& module = activation->module();
+ const Module& module = activation->module().wasm();
#ifdef JS_SIMULATOR
if (module.containsFunctionPC(rt->simulator()->get_pc_as<void*>()))
- rt->simulator()->set_resume_pc(module.interruptExit());
+ rt->simulator()->set_resume_pc(module.interrupt());
#endif
uint8_t** ppc = ContextToPC(context);
uint8_t* pc = *ppc;
if (module.containsFunctionPC(pc)) {
activation->setResumePC(pc);
- *ppc = module.interruptExit();
+ *ppc = module.interrupt();
return true;
}
}
return false;
}
#if !defined(XP_WIN)
@@ -1216,21 +1218,21 @@ static void
JitInterruptHandler(int signum, siginfo_t* info, void* context)
{
if (JSRuntime* rt = RuntimeForCurrentThread())
RedirectJitCodeToInterruptCheck(rt, (CONTEXT*)context);
}
#endif
bool
-js::EnsureSignalHandlersInstalled(JSRuntime* rt)
+wasm::EnsureSignalHandlersInstalled(JSRuntime* rt)
{
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
// On OSX, each JSRuntime gets its own handler thread.
- if (!rt->asmJSMachExceptionHandler.installed() && !rt->asmJSMachExceptionHandler.install(rt))
+ if (!rt->wasmMachExceptionHandler.installed() && !rt->wasmMachExceptionHandler.install(rt))
return false;
#endif
// All the rest of the handlers are process-wide and thus must only be
// installed once. We assume that there are no races creating the first
// JSRuntime of the process.
static bool sTried = false;
static bool sResult = false;
rename from js/src/asmjs/AsmJSSignalHandlers.h
rename to js/src/asmjs/WasmSignalHandlers.h
--- a/js/src/asmjs/AsmJSSignalHandlers.h
+++ b/js/src/asmjs/WasmSignalHandlers.h
@@ -11,58 +11,61 @@
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef asmjs_AsmJSSignalHandlers_h
-#define asmjs_AsmJSSignalHandlers_h
+#ifndef wasm_signal_handlers_h
+#define wasm_signal_handlers_h
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
# include <mach/mach.h>
# include "jslock.h"
#endif
struct JSRuntime;
namespace js {
+// Force any currently-executing asm.js code to call HandleExecutionInterrupt.
+extern void
+InterruptRunningJitCode(JSRuntime* rt);
+
+namespace wasm {
+
// Set up any signal/exception handlers needed to execute code in the given
// runtime. Return whether runtime can:
// - rely on fault handler support for avoiding asm.js heap bounds checks
// - rely on InterruptRunningJitCode to halt running Ion/asm.js from any thread
bool
EnsureSignalHandlersInstalled(JSRuntime* rt);
-// Force any currently-executing asm.js code to call HandleExecutionInterrupt.
-extern void
-InterruptRunningJitCode(JSRuntime* rt);
-
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
// On OSX we are forced to use the lower-level Mach exception mechanism instead
// of Unix signals. Mach exceptions are not handled on the victim's stack but
// rather require an extra thread. For simplicity, we create one such thread
// per JSRuntime (upon the first use of asm.js in the JSRuntime). This thread
// and related resources are owned by AsmJSMachExceptionHandler which is owned
// by JSRuntime.
-class AsmJSMachExceptionHandler
+class MachExceptionHandler
{
bool installed_;
PRThread* thread_;
mach_port_t port_;
void uninstall();
public:
- AsmJSMachExceptionHandler();
- ~AsmJSMachExceptionHandler() { uninstall(); }
+ MachExceptionHandler();
+ ~MachExceptionHandler() { uninstall(); }
mach_port_t port() const { return port_; }
bool installed() const { return installed_; }
bool install(JSRuntime* rt);
};
#endif
+} // namespace wasm
} // namespace js
-#endif // asmjs_AsmJSSignalHandlers_h
+#endif // wasm_signal_handlers_h
--- a/js/src/asmjs/WasmStubs.cpp
+++ b/js/src/asmjs/WasmStubs.cpp
@@ -16,18 +16,16 @@
* limitations under the License.
*/
#include "asmjs/WasmStubs.h"
#include "mozilla/ArrayUtils.h"
#include "mozilla/EnumeratedRange.h"
-#include "asmjs/AsmJSModule.h"
-
#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::ArrayLength;
using mozilla::MakeEnumeratedRange;
@@ -92,30 +90,28 @@ static const unsigned FramePushedAfterSa
static const unsigned FramePushedAfterSave = 0;
#else
static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * sizeof(intptr_t)
+ NonVolatileRegs.fpus().getPushSizeInBytes();
#endif
static const unsigned FramePushedForEntrySP = FramePushedAfterSave + sizeof(void*);
// Generate a stub that enters wasm from a C++ caller via the native ABI.
-// The signature of the entry point is AsmJSModule::CodePtr. The exported wasm
+// The signature of the entry point is Module::CodePtr. The exported wasm
// function has an ABI derived from its specific signature, so this function
// must map from the ABI of CodePtr to the export's signature's ABI.
static bool
-GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex,
- const FuncOffsetVector& funcOffsets)
+GenerateEntry(ModuleGenerator& mg, unsigned exportIndex, Module::HeapBool usesHeap)
{
- AsmJSModule::ExportedFunction& exp = module.exportedFunction(exportIndex);
- if (exp.isChangeHeap())
- return true;
+ MacroAssembler& masm = mg.masm();
+ const MallocSig& sig = mg.exportSig(exportIndex);
masm.haltingAlign(CodeAlignment);
- AsmJSOffsets offsets;
+ Offsets offsets;
offsets.begin = masm.currentOffset();
// Save the return address if it wasn't already saved by the call insn.
#if defined(JS_CODEGEN_ARM)
masm.push(lr);
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
masm.push(ra);
#elif defined(JS_CODEGEN_X86)
@@ -163,35 +159,35 @@ GenerateEntry(MacroAssembler& masm, AsmJ
masm.storeStackPtr(Address(scratch, AsmJSActivation::offsetOfEntrySP()));
// Dynamically align the stack since ABIStackAlignment is not necessarily
// AsmJSStackAlignment. We'll use entrySP to recover the original stack
// pointer on return.
masm.andToStackPtr(Imm32(~(AsmJSStackAlignment - 1)));
// Bump the stack for the call.
- masm.reserveStack(AlignBytes(StackArgBytes(exp.sig().args()), AsmJSStackAlignment));
+ masm.reserveStack(AlignBytes(StackArgBytes(sig.args()), AsmJSStackAlignment));
// Copy parameters out of argv and into the registers/stack-slots specified by
// the system ABI.
- for (ABIArgValTypeIter iter(exp.sig().args()); !iter.done(); iter++) {
- unsigned argOffset = iter.index() * sizeof(AsmJSModule::EntryArg);
+ for (ABIArgValTypeIter iter(sig.args()); !iter.done(); iter++) {
+ unsigned argOffset = iter.index() * sizeof(Module::EntryArg);
Address src(argv, argOffset);
MIRType type = iter.mirType();
switch (iter->kind()) {
case ABIArg::GPR:
masm.load32(src, iter->gpr());
break;
#ifdef JS_CODEGEN_REGISTER_PAIR
case ABIArg::GPR_PAIR:
MOZ_CRASH("AsmJS uses hardfp for function calls.");
break;
#endif
case ABIArg::FPU: {
- static_assert(sizeof(AsmJSModule::EntryArg) >= jit::Simd128DataSize,
+ static_assert(sizeof(Module::EntryArg) >= jit::Simd128DataSize,
"EntryArg must be big enough to store SIMD values");
switch (type) {
case MIRType_Int32x4:
masm.loadUnalignedInt32x4(src, iter->fpu());
break;
case MIRType_Float32x4:
masm.loadUnalignedFloat32x4(src, iter->fpu());
break;
@@ -236,29 +232,29 @@ GenerateEntry(MacroAssembler& masm, AsmJ
}
break;
}
}
// Call into the real function.
masm.assertStackAlignment(AsmJSStackAlignment);
Label target;
- target.bind(funcOffsets[exp.funcIndex()]);
+ target.bind(mg.funcEntryOffsets()[mg.exportFuncIndex(exportIndex)]);
masm.call(CallSiteDesc(CallSiteDesc::Relative), &target);
// Recover the stack pointer value before dynamic alignment.
masm.loadAsmJSActivation(scratch);
masm.loadStackPtr(Address(scratch, AsmJSActivation::offsetOfEntrySP()));
masm.setFramePushed(FramePushedForEntrySP);
// Recover the 'argv' pointer which was saved before aligning the stack.
masm.Pop(argv);
// Store the return value in argv[0]
- switch (exp.sig().ret()) {
+ switch (sig.ret()) {
case ExprType::Void:
break;
case ExprType::I32:
masm.storeValue(JSVAL_TYPE_INT32, ReturnReg, Address(argv, 0));
break;
case ExprType::I64:
MOZ_CRASH("no int64 in asm.js");
case ExprType::F32:
@@ -283,127 +279,18 @@ GenerateEntry(MacroAssembler& masm, AsmJ
MOZ_ASSERT(masm.framePushed() == 0);
masm.move32(Imm32(true), ReturnReg);
masm.ret();
if (masm.oom())
return false;
- exp.initCodeOffset(offsets.begin);
offsets.end = masm.currentOffset();
- return module.addCodeRange(AsmJSModule::CodeRange::Entry, offsets);
-}
-
-// Generate a thunk that updates fp before calling the given builtin so that
-// both the builtin and the calling function show up in profiler stacks. (This
-// thunk is dynamically patched in when profiling is enabled.) Since the thunk
-// pushes an AsmJSFrame on the stack, that means we must rebuild the stack
-// frame. Fortunately, these are low arity functions and everything is passed in
-// regs on everything but x86 anyhow.
-//
-// NB: Since this thunk is being injected at system ABI callsites, it must
-// preserve the argument registers (going in) and the return register
-// (coming out) and preserve non-volatile registers.
-static bool
-GenerateBuiltinThunk(MacroAssembler& masm, AsmJSModule& module, Builtin builtin)
-{
- MIRTypeVector args;
- switch (builtin) {
- case Builtin::ToInt32:
- MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
- break;
-#if defined(JS_CODEGEN_ARM)
- case Builtin::aeabi_idivmod:
- case Builtin::aeabi_uidivmod:
- MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
- MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
- break;
- case Builtin::AtomicCmpXchg:
- MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
- MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
- MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
- MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
- break;
- case Builtin::AtomicXchg:
- case Builtin::AtomicFetchAdd:
- case Builtin::AtomicFetchSub:
- case Builtin::AtomicFetchAnd:
- case Builtin::AtomicFetchOr:
- case Builtin::AtomicFetchXor:
- MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
- MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
- MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
- break;
-#endif
- case Builtin::SinD:
- case Builtin::CosD:
- case Builtin::TanD:
- case Builtin::ASinD:
- case Builtin::ACosD:
- case Builtin::ATanD:
- case Builtin::CeilD:
- case Builtin::FloorD:
- case Builtin::ExpD:
- case Builtin::LogD:
- MOZ_ALWAYS_TRUE(args.append(MIRType_Double));
- break;
- case Builtin::ModD:
- case Builtin::PowD:
- case Builtin::ATan2D:
- MOZ_ALWAYS_TRUE(args.append(MIRType_Double));
- MOZ_ALWAYS_TRUE(args.append(MIRType_Double));
- break;
- case Builtin::CeilF:
- case Builtin::FloorF:
- MOZ_ALWAYS_TRUE(args.append(MIRType_Float32));
- break;
- case Builtin::Limit:
- MOZ_CRASH("Bad builtin");
- }
-
- MOZ_ASSERT(args.length() <= 4);
- static_assert(MIRTypeVector::InlineLength >= 4, "infallibility of append");
-
- MOZ_ASSERT(masm.framePushed() == 0);
- uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args);
-
- AsmJSProfilingOffsets offsets;
- GenerateAsmJSExitPrologue(masm, framePushed, ExitReason(builtin), &offsets);
-
- for (ABIArgMIRTypeIter i(args); !i.done(); i++) {
- if (i->kind() != ABIArg::Stack)
- continue;
-#if !defined(JS_CODEGEN_ARM)
- unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed();
- Address srcAddr(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
- Address dstAddr(masm.getStackPointer(), i->offsetFromArgBase());
- if (i.mirType() == MIRType_Int32 || i.mirType() == MIRType_Float32) {
- masm.load32(srcAddr, ABIArgGenerator::NonArg_VolatileReg);
- masm.store32(ABIArgGenerator::NonArg_VolatileReg, dstAddr);
- } else {
- MOZ_ASSERT(i.mirType() == MIRType_Double);
- masm.loadDouble(srcAddr, ScratchDoubleReg);
- masm.storeDouble(ScratchDoubleReg, dstAddr);
- }
-#else
- MOZ_CRASH("Architecture should have enough registers for all builtin calls");
-#endif
- }
-
- AssertStackAlignment(masm, ABIStackAlignment);
- masm.call(BuiltinToImmediate(builtin));
-
- GenerateAsmJSExitEpilogue(masm, framePushed, ExitReason(builtin), &offsets);
-
- if (masm.oom())
- return false;
-
- offsets.end = masm.currentOffset();
- return module.addBuiltinThunkCodeRange(builtin, offsets);
+ return mg.defineExport(exportIndex, offsets);
}
static void
FillArgumentArray(MacroAssembler& masm, const MallocSig::ArgVector& args, unsigned argOffset,
unsigned offsetToCallerStackArgs, Register scratch)
{
for (ABIArgValTypeIter i(args); !i.done(); i++) {
Address dstAddr(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
@@ -436,84 +323,80 @@ FillArgumentArray(MacroAssembler& masm,
masm.canonicalizeDouble(ScratchDoubleReg);
masm.storeDouble(ScratchDoubleReg, dstAddr);
}
break;
}
}
}
-// If an FFI detaches its heap (viz., via ArrayBuffer.transfer), it must
+// If an import call detaches its heap (viz., via ArrayBuffer.transfer), it must
// call change-heap to another heap (viz., the new heap returned by transfer)
// before returning to asm.js code. If the application fails to do this (if the
// heap pointer is null), jump to a stub.
static void
-CheckForHeapDetachment(MacroAssembler& masm, const AsmJSModule& module, Register scratch,
- Label* onDetached)
+CheckForHeapDetachment(MacroAssembler& masm, Register scratch, Label* onDetached)
{
- if (!module.hasArrayView())
- return;
-
MOZ_ASSERT(int(masm.framePushed()) >= int(ShadowStackSpace));
AssertStackAlignment(masm, ABIStackAlignment);
#if defined(JS_CODEGEN_X86)
CodeOffset offset = masm.movlWithPatch(PatchedAbsoluteAddress(), scratch);
masm.append(AsmJSGlobalAccess(offset, HeapGlobalDataOffset));
masm.branchTestPtr(Assembler::Zero, scratch, scratch, onDetached);
#else
masm.branchTestPtr(Assembler::Zero, HeapReg, HeapReg, onDetached);
#endif
}
// Generate a stub that is called via the internal ABI derived from the
-// signature of the exit and calls into an appropriate InvokeFromAsmJS_* C++
+// signature of the import and calls into an appropriate InvokeImport C++
// function, having boxed all the ABI arguments into a homogeneous Value array.
static bool
-GenerateInterpExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
- Label* throwLabel, Label* onDetached)
+GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, Module::HeapBool usesHeap,
+ Label* throwLabel, Label* onDetached, ProfilingOffsets* offsets)
{
- AsmJSModule::Exit& exit = module.exit(exitIndex);
+ MacroAssembler& masm = mg.masm();
+ const MallocSig& sig = mg.importSig(importIndex);
masm.setFramePushed(0);
- // Argument types for InvokeFromAsmJS_*:
- static const MIRType typeArray[] = { MIRType_Pointer, // exitDatum
+ // Argument types for InvokeImport_*:
+ static const MIRType typeArray[] = { MIRType_Pointer, // ImportExit
MIRType_Int32, // argc
MIRType_Pointer }; // argv
MIRTypeVector invokeArgTypes;
MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, ArrayLength(typeArray)));
// At the point of the call, the stack layout shall be (sp grows to the left):
// | stack args | padding | Value argv[] | padding | retaddr | caller stack args |
// The padding between stack args and argv ensures that argv is aligned. The
// padding between argv and retaddr ensures that sp is aligned.
unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
- unsigned argBytes = Max<size_t>(1, exit.sig().args().length()) * sizeof(Value);
+ unsigned argBytes = Max<size_t>(1, sig.args().length()) * sizeof(Value);
unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
- AsmJSProfilingOffsets offsets;
- GenerateAsmJSExitPrologue(masm, framePushed, ExitReason::Slow, &offsets);
+ GenerateExitPrologue(masm, framePushed, ExitReason::ImportInterp, offsets);
// Fill the argument array.
unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed();
Register scratch = ABIArgGenerator::NonArgReturnReg0;
- FillArgumentArray(masm, exit.sig().args(), argOffset, offsetToCallerStackArgs, scratch);
+ FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch);
- // Prepare the arguments for the call to InvokeFromAsmJS_*.
+ // Prepare the arguments for the call to InvokeImport_*.
ABIArgMIRTypeIter i(invokeArgTypes);
- // argument 0: exitIndex
+ // argument 0: importIndex
if (i->kind() == ABIArg::GPR)
- masm.mov(ImmWord(exitIndex), i->gpr());
+ masm.mov(ImmWord(importIndex), i->gpr());
else
- masm.store32(Imm32(exitIndex), Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ masm.store32(Imm32(importIndex), Address(masm.getStackPointer(), i->offsetFromArgBase()));
i++;
// argument 1: argc
- unsigned argc = exit.sig().args().length();
+ unsigned argc = sig.args().length();
if (i->kind() == ABIArg::GPR)
masm.mov(ImmWord(argc), i->gpr());
else
masm.store32(Imm32(argc), Address(masm.getStackPointer(), i->offsetFromArgBase()));
i++;
// argument 2: argv
Address argv(masm.getStackPointer(), argOffset);
@@ -523,142 +406,144 @@ GenerateInterpExit(MacroAssembler& masm,
masm.computeEffectiveAddress(argv, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
}
i++;
MOZ_ASSERT(i.done());
// Make the call, test whether it succeeded, and extract the return value.
AssertStackAlignment(masm, ABIStackAlignment);
- switch (exit.sig().ret()) {
+ switch (sig.ret()) {
case ExprType::Void:
- masm.call(SymbolicAddress::InvokeFromAsmJS_Ignore);
+ masm.call(SymbolicAddress::InvokeImport_Void);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
break;
case ExprType::I32:
- masm.call(SymbolicAddress::InvokeFromAsmJS_ToInt32);
+ masm.call(SymbolicAddress::InvokeImport_I32);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.unboxInt32(argv, ReturnReg);
break;
case ExprType::I64:
MOZ_CRASH("no int64 in asm.js");
case ExprType::F32:
MOZ_CRASH("Float32 shouldn't be returned from a FFI");
case ExprType::F64:
- masm.call(SymbolicAddress::InvokeFromAsmJS_ToNumber);
+ masm.call(SymbolicAddress::InvokeImport_F64);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.loadDouble(argv, ReturnDoubleReg);
break;
case ExprType::I32x4:
case ExprType::F32x4:
MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
}
// The heap pointer may have changed during the FFI, so reload it and test
// for detachment.
masm.loadAsmJSHeapRegisterFromGlobalData();
- CheckForHeapDetachment(masm, module, ABIArgGenerator::NonReturn_VolatileReg0, onDetached);
+ if (usesHeap)
+ CheckForHeapDetachment(masm, ABIArgGenerator::NonReturn_VolatileReg0, onDetached);
- GenerateAsmJSExitEpilogue(masm, framePushed, ExitReason::Slow, &offsets);
+ GenerateExitEpilogue(masm, framePushed, ExitReason::ImportInterp, offsets);
if (masm.oom())
return false;
- offsets.end = masm.currentOffset();
- exit.initInterpOffset(offsets.begin);
- return module.addCodeRange(AsmJSModule::CodeRange::SlowFFI, offsets);
+ offsets->end = masm.currentOffset();
+ return true;
}
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
static const unsigned MaybeSavedGlobalReg = sizeof(void*);
#else
static const unsigned MaybeSavedGlobalReg = 0;
#endif
// Generate a stub that is called via the internal ABI derived from the
-// signature of the exit and calls into a compatible Ion-compiled JIT function,
-// having boxed all the ABI arguments into the Ion stack frame layout.
+// signature of the import and calls into a compatible JIT function,
+// having boxed all the ABI arguments into the JIT stack frame layout.
static bool
-GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
- Label* throwLabel, Label* onDetached)
+GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, Module::HeapBool usesHeap,
+ Label* throwLabel, Label* onDetached, ProfilingOffsets* offsets)
{
- AsmJSModule::Exit& exit = module.exit(exitIndex);
+ MacroAssembler& masm = mg.masm();
+ const MallocSig& sig = mg.importSig(importIndex);
masm.setFramePushed(0);
- // Ion calls use the following stack layout (sp grows to the left):
+ // JIT calls use the following stack layout (sp grows to the left):
// | retaddr | descriptor | callee | argc | this | arg1..N |
- // After the Ion frame, the global register (if present) is saved since Ion
- // does not preserve non-volatile regs. Also, unlike most ABIs, Ion requires
- // that sp be JitStackAlignment-aligned *after* pushing the return address.
+ // After the JIT frame, the global register (if present) is saved since the
+ // JIT's ABI does not preserve non-volatile regs. Also, unlike most ABIs,
+ // the JIT ABI requires that sp be JitStackAlignment-aligned *after* pushing
+ // the return address.
static_assert(AsmJSStackAlignment >= JitStackAlignment, "subsumes");
unsigned sizeOfRetAddr = sizeof(void*);
- unsigned ionFrameBytes = 3 * sizeof(void*) + (1 + exit.sig().args().length()) * sizeof(Value);
- unsigned totalIonBytes = sizeOfRetAddr + ionFrameBytes + MaybeSavedGlobalReg;
- unsigned ionFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalIonBytes) -
+ unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + sig.args().length()) * sizeof(Value);
+ unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes + MaybeSavedGlobalReg;
+ unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
sizeOfRetAddr;
- AsmJSProfilingOffsets offsets;
- GenerateAsmJSExitPrologue(masm, ionFramePushed, ExitReason::Jit, &offsets);
+ GenerateExitPrologue(masm, jitFramePushed, ExitReason::ImportJit, offsets);
// 1. Descriptor
size_t argOffset = 0;
- uint32_t descriptor = MakeFrameDescriptor(ionFramePushed, JitFrame_Entry);
+ uint32_t descriptor = MakeFrameDescriptor(jitFramePushed, JitFrame_Entry);
masm.storePtr(ImmWord(uintptr_t(descriptor)), Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(size_t);
// 2. Callee
Register callee = ABIArgGenerator::NonArgReturnReg0; // live until call
Register scratch = ABIArgGenerator::NonArgReturnReg1; // repeatedly clobbered
// 2.1. Get ExitDatum
- unsigned globalDataOffset = module.exit(exitIndex).globalDataOffset();
+ unsigned globalDataOffset = mg.importExitGlobalDataOffset(importIndex);
#if defined(JS_CODEGEN_X64)
masm.append(AsmJSGlobalAccess(masm.leaRipRelative(callee), globalDataOffset));
#elif defined(JS_CODEGEN_X86)
masm.append(AsmJSGlobalAccess(masm.movlWithPatch(Imm32(0), callee), globalDataOffset));
-#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
masm.computeEffectiveAddress(Address(GlobalReg, globalDataOffset - AsmJSGlobalRegBias), callee);
#endif
// 2.2. Get callee
- masm.loadPtr(Address(callee, offsetof(AsmJSModule::ExitDatum, fun)), callee);
+ masm.loadPtr(Address(callee, Module::OffsetOfImportExitFun), callee);
// 2.3. Save callee
masm.storePtr(callee, Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(size_t);
// 2.4. Load callee executable entry point
masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
masm.loadBaselineOrIonNoArgCheck(callee, callee, nullptr);
// 3. Argc
- unsigned argc = exit.sig().args().length();
+ unsigned argc = sig.args().length();
masm.storePtr(ImmWord(uintptr_t(argc)), Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(size_t);
// 4. |this| value
masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(Value);
// 5. Fill the arguments
- unsigned offsetToCallerStackArgs = ionFramePushed + sizeof(AsmJSFrame);
- FillArgumentArray(masm, exit.sig().args(), argOffset, offsetToCallerStackArgs, scratch);
- argOffset += exit.sig().args().length() * sizeof(Value);
- MOZ_ASSERT(argOffset == ionFrameBytes);
+ unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(AsmJSFrame);
+ FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch);
+ argOffset += sig.args().length() * sizeof(Value);
+ MOZ_ASSERT(argOffset == jitFrameBytes);
// 6. Jit code will clobber all registers, even non-volatiles. GlobalReg and
// HeapReg are removed from the general register set for asm.js code, so
// these will not have been saved by the caller like all other registers,
// so they must be explicitly preserved. Only save GlobalReg since
// HeapReg must be reloaded (from global data) after the call since the
// heap may change during the FFI call.
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
static_assert(MaybeSavedGlobalReg == sizeof(void*), "stack frame accounting");
- masm.storePtr(GlobalReg, Address(masm.getStackPointer(), ionFrameBytes));
+ masm.storePtr(GlobalReg, Address(masm.getStackPointer(), jitFrameBytes));
#endif
{
// Enable Activation.
//
// This sequence requires four registers, and needs to preserve the 'callee'
// register, so there are five live registers.
MOZ_ASSERT(callee == AsmJSIonExitRegCallee);
@@ -761,62 +646,63 @@ GenerateIonExit(MacroAssembler& masm, As
masm.loadPtr(Address(reg1, JitActivation::offsetOfPrevJitJSContext()), reg2);
masm.storePtr(reg2, Address(reg0, offsetOfJitJSContext));
// rt->jitActivation = prevJitActivation_;
masm.loadPtr(Address(reg1, JitActivation::offsetOfPrevJitActivation()), reg2);
masm.storePtr(reg2, Address(reg0, offsetOfJitActivation));
}
- // Reload the global register since Ion code can clobber any register.
+ // Reload the global register since JIT code can clobber any register.
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
static_assert(MaybeSavedGlobalReg == sizeof(void*), "stack frame accounting");
- masm.loadPtr(Address(masm.getStackPointer(), ionFrameBytes), GlobalReg);
+ masm.loadPtr(Address(masm.getStackPointer(), jitFrameBytes), GlobalReg);
#endif
- // As explained above, the frame was aligned for Ion such that
+ // As explained above, the frame was aligned for the JIT ABI such that
// (sp + sizeof(void*)) % JitStackAlignment == 0
// But now we possibly want to call one of several different C++ functions,
// so subtract the sizeof(void*) so that sp is aligned for an ABI call.
static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
masm.reserveStack(sizeOfRetAddr);
unsigned nativeFramePushed = masm.framePushed();
AssertStackAlignment(masm, ABIStackAlignment);
masm.branchTestMagic(Assembler::Equal, JSReturnOperand, throwLabel);
Label oolConvert;
- switch (exit.sig().ret()) {
+ switch (sig.ret()) {
case ExprType::Void:
break;
case ExprType::I32:
masm.convertValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg, &oolConvert,
/* -0 check */ false);
break;
case ExprType::I64:
MOZ_CRASH("no int64 in asm.js");
case ExprType::F32:
- MOZ_CRASH("Float shouldn't be returned from a FFI");
+ MOZ_CRASH("Float shouldn't be returned from an import");
case ExprType::F64:
masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
break;
case ExprType::I32x4:
case ExprType::F32x4:
- MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
+ MOZ_CRASH("SIMD types shouldn't be returned from an import");
}
Label done;
masm.bind(&done);
- // The heap pointer has to be reloaded anyway since Ion could have clobbered
- // it. Additionally, the FFI may have detached the heap buffer.
+ // The heap pointer has to be reloaded anyway since JIT code could have
+ // clobbered it. Additionally, the import may have detached the heap buffer.
masm.loadAsmJSHeapRegisterFromGlobalData();
- CheckForHeapDetachment(masm, module, ABIArgGenerator::NonReturn_VolatileReg0, onDetached);
+ if (usesHeap)
+ CheckForHeapDetachment(masm, ABIArgGenerator::NonReturn_VolatileReg0, onDetached);
- GenerateAsmJSExitEpilogue(masm, masm.framePushed(), ExitReason::Jit, &offsets);
+ GenerateExitEpilogue(masm, masm.framePushed(), ExitReason::ImportJit, offsets);
if (oolConvert.used()) {
masm.bind(&oolConvert);
masm.setFramePushed(nativeFramePushed);
// Coercion calls use the following stack layout (sp grows to the left):
// | args | padding | Value argv[1] | padding | exit AsmJSFrame |
MIRTypeVector coerceArgTypes;
@@ -837,17 +723,17 @@ GenerateIonExit(MacroAssembler& masm, As
masm.computeEffectiveAddress(argv, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
}
i++;
MOZ_ASSERT(i.done());
// Call coercion function
AssertStackAlignment(masm, ABIStackAlignment);
- switch (exit.sig().ret()) {
+ switch (sig.ret()) {
case ExprType::I32:
masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnReg);
break;
case ExprType::F64:
masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
@@ -861,54 +747,56 @@ GenerateIonExit(MacroAssembler& masm, As
masm.setFramePushed(0);
}
MOZ_ASSERT(masm.framePushed() == 0);
if (masm.oom())
return false;
- offsets.end = masm.currentOffset();
- exit.initJitOffset(offsets.begin);
- return module.addCodeRange(AsmJSModule::CodeRange::JitFFI, offsets);
+ offsets->end = masm.currentOffset();
+ return true;
}
// Generate a stub that is called when returning from an exit where the module's
// buffer has been detached. This stub first calls a C++ function to report an
// exception and then jumps to the generic throw stub to pop everything off the
// stack.
static bool
-GenerateOnDetachedExit(MacroAssembler& masm, AsmJSModule& module, Label* onDetached,
- Label* throwLabel)
+GenerateOnDetachedStub(ModuleGenerator& mg, Label* onDetached, Label* throwLabel)
{
+ MacroAssembler& masm = mg.masm();
+
masm.haltingAlign(CodeAlignment);
- AsmJSOffsets offsets;
+ Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(onDetached);
// For now, OnDetached always throws (see OnDetached comment).
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::OnDetached);
masm.jump(throwLabel);
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
- return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
+ return mg.defineInlineStub(offsets);
}
// Generate a stub that is called immediately after the prologue when there is a
// stack overflow. This stub calls a C++ function to report the error and then
// jumps to the throw stub to pop the activation.
static bool
-GenerateStackOverflowExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
+GenerateStackOverflowStub(ModuleGenerator& mg, Label* throwLabel)
{
+ MacroAssembler& masm = mg.masm();
+
masm.haltingAlign(CodeAlignment);
- AsmJSOffsets offsets;
+ Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(masm.asmStackOverflowLabel());
// If we reach here via the non-profiling prologue, AsmJSActivation::fp has
// not been updated. To enable stack unwinding from C++, store to it now. If
// we reached here via the profiling prologue, we'll just store the same
// value again. Do not update AsmJSFrame::callerFP as it is not necessary in
// the non-profiling case (there is no return path from this point) and, in
@@ -925,119 +813,125 @@ GenerateStackOverflowExit(MacroAssembler
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::ReportOverRecursed);
masm.jump(throwLabel);
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
- return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
+ return mg.defineInlineStub(offsets);
}
// Generate a stub that is called from the synchronous, inline interrupt checks
// when the interrupt flag is set. This stub calls the C++ function to handle
// the interrupt which returns whether execution has been interrupted.
static bool
-GenerateSyncInterruptExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
+GenerateSyncInterruptStub(ModuleGenerator& mg, Label* throwLabel)
{
+ MacroAssembler& masm = mg.masm();
+
masm.setFramePushed(0);
unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, ShadowStackSpace);
- AsmJSProfilingOffsets offsets;
- GenerateAsmJSExitPrologue(masm, framePushed, ExitReason::Interrupt, &offsets,
- masm.asmSyncInterruptLabel());
+ ProfilingOffsets offsets;
+ GenerateExitPrologue(masm, framePushed, ExitReason::Native, &offsets,
+ masm.asmSyncInterruptLabel());
AssertStackAlignment(masm, ABIStackAlignment);
masm.call(SymbolicAddress::HandleExecutionInterrupt);
masm.branchIfFalseBool(ReturnReg, throwLabel);
- GenerateAsmJSExitEpilogue(masm, framePushed, ExitReason::Interrupt, &offsets);
+ GenerateExitEpilogue(masm, framePushed, ExitReason::Native, &offsets);
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
- return module.addCodeRange(AsmJSModule::CodeRange::Interrupt, offsets);
+ return mg.defineSyncInterruptStub(offsets);
}
// Generate a stub that is jumped to from an out-of-bounds heap access when
// there are throwing semantics. This stub calls a C++ function to report an
// error and then jumps to the throw stub to pop the activation.
static bool
-GenerateConversionErrorExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
+GenerateConversionErrorStub(ModuleGenerator& mg, Label* throwLabel)
{
+ MacroAssembler& masm = mg.masm();
+
masm.haltingAlign(CodeAlignment);
- AsmJSOffsets offsets;
+ Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(masm.asmOnConversionErrorLabel());
// sp can be anything at this point, so ensure it is aligned when calling
// into C++. We unconditionally jump to throw so don't worry about restoring sp.
masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
- // OnOutOfBounds always throws.
+ // OnImpreciseConversion always throws.
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::OnImpreciseConversion);
masm.jump(throwLabel);
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
- module.setOnOutOfBoundsExitOffset(offsets.begin);
- return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
+ return mg.defineInlineStub(offsets);
}
// Generate a stub that is jumped to from an out-of-bounds heap access when
// there are throwing semantics. This stub calls a C++ function to report an
// error and then jumps to the throw stub to pop the activation.
static bool
-GenerateOutOfBoundsExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
+GenerateOutOfBoundsStub(ModuleGenerator& mg, Label* throwLabel)
{
+ MacroAssembler& masm = mg.masm();
+
masm.haltingAlign(CodeAlignment);
- AsmJSOffsets offsets;
+ Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(masm.asmOnOutOfBoundsLabel());
// sp can be anything at this point, so ensure it is aligned when calling
// into C++. We unconditionally jump to throw so don't worry about restoring sp.
masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
// OnOutOfBounds always throws.
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::OnOutOfBounds);
masm.jump(throwLabel);
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
- module.setOnOutOfBoundsExitOffset(offsets.begin);
- return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
+ return mg.defineOutOfBoundsStub(offsets);
}
static const LiveRegisterSet AllRegsExceptSP(
GeneralRegisterSet(Registers::AllMask&
~(uint32_t(1) << Registers::StackPointer)),
FloatRegisterSet(FloatRegisters::AllMask));
// The async interrupt-callback exit is called from arbitrarily-interrupted asm.js
// code. That means we must first save *all* registers and restore *all*
// registers (except the stack pointer) when we resume. The address to resume to
// (assuming that js::HandleExecutionInterrupt doesn't indicate that the
// execution should be aborted) is stored in AsmJSActivation::resumePC_.
// Unfortunately, loading this requires a scratch register which we don't have
// after restoring all registers. To hack around this, push the resumePC on the
// stack so that it can be popped directly into PC.
static bool
-GenerateAsyncInterruptExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
+GenerateAsyncInterruptStub(ModuleGenerator& mg, Module::HeapBool usesHeap, Label* throwLabel)
{
+ MacroAssembler& masm = mg.masm();
+
masm.haltingAlign(CodeAlignment);
- AsmJSOffsets offsets;
+ Offsets offsets;
offsets.begin = masm.currentOffset();
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
// Be very careful here not to perturb the machine state before saving it
// to the stack. In particular, add/sub instructions may set conditions in
// the flags register.
masm.push(Imm32(0)); // space for resumePC
masm.pushFlags(); // after this we are safe to use sub
@@ -1177,30 +1071,31 @@ GenerateAsyncInterruptExit(MacroAssemble
#else
# error "Unknown architecture!"
#endif
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
- module.setAsyncInterruptOffset(offsets.begin);
- return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
+ return mg.defineAsyncInterruptStub(offsets);
}
// If an exception is thrown, simply pop all frames (since asm.js does not
// contain try/catch). To do this:
// 1. Restore 'sp' to it's value right after the PushRegsInMask in GenerateEntry.
// 2. PopRegsInMask to restore the caller's non-volatile registers.
// 3. Return (to CallAsmJS).
static bool
-GenerateThrowStub(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
+GenerateThrowStub(ModuleGenerator& mg, Label* throwLabel)
{
+ MacroAssembler& masm = mg.masm();
+
masm.haltingAlign(CodeAlignment);
- AsmJSOffsets offsets;
+ Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(throwLabel);
// We are about to pop all frames in this AsmJSActivation. Set fp to null to
// maintain the invariant that fp is either null or pointing to a valid
// frame.
Register scratch = ABIArgGenerator::NonArgReturnReg0;
masm.loadAsmJSActivation(scratch);
@@ -1214,74 +1109,75 @@ GenerateThrowStub(MacroAssembler& masm,
masm.mov(ImmWord(0), ReturnReg);
masm.ret();
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
- return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
+ return mg.defineInlineStub(offsets);
}
bool
-wasm::GenerateStubs(MacroAssembler& masm, AsmJSModule& module, const FuncOffsetVector& funcOffsets)
+wasm::GenerateStubs(ModuleGenerator& mg, Module::HeapBool usesHeap)
{
- for (unsigned i = 0; i < module.numExportedFunctions(); i++) {
- if (!GenerateEntry(masm, module, i, funcOffsets))
- return false;
- }
-
- for (auto builtin : MakeEnumeratedRange(Builtin::Limit)) {
- if (!GenerateBuiltinThunk(masm, module, builtin))
+ for (unsigned i = 0; i < mg.numDeclaredExports(); i++) {
+ if (!GenerateEntry(mg, i, usesHeap))
return false;
}
Label onThrow;
{
Label onDetached;
- for (size_t i = 0; i < module.numExits(); i++) {
- if (!GenerateInterpExit(masm, module, i, &onThrow, &onDetached))
+ for (size_t i = 0; i < mg.numDeclaredImports(); i++) {
+ ProfilingOffsets interp;
+ if (!GenerateInterpExitStub(mg, i, usesHeap, &onThrow, &onDetached, &interp))
return false;
- if (!GenerateIonExit(masm, module, i, &onThrow, &onDetached))
+
+ ProfilingOffsets jit;
+ if (!GenerateJitExitStub(mg, i, usesHeap, &onThrow, &onDetached, &jit))
+ return false;
+
+ if (!mg.defineImport(i, interp, jit))
return false;
}
if (onDetached.used()) {
- if (!GenerateOnDetachedExit(masm, module, &onDetached, &onThrow))
+ if (!GenerateOnDetachedStub(mg, &onDetached, &onThrow))
return false;
}
}
- if (masm.asmStackOverflowLabel()->used()) {
- if (!GenerateStackOverflowExit(masm, module, &onThrow))
+ if (mg.masm().asmStackOverflowLabel()->used()) {
+ if (!GenerateStackOverflowStub(mg, &onThrow))
return false;
}
- if (masm.asmSyncInterruptLabel()->used()) {
- if (!GenerateSyncInterruptExit(masm, module, &onThrow))
+ if (mg.masm().asmSyncInterruptLabel()->used()) {
+ if (!GenerateSyncInterruptStub(mg, &onThrow))
return false;
}
- if (masm.asmOnConversionErrorLabel()->used()) {
- if (!GenerateConversionErrorExit(masm, module, &onThrow))
+ if (mg.masm().asmOnConversionErrorLabel()->used()) {
+ if (!GenerateConversionErrorStub(mg, &onThrow))
return false;
}
// Generate unconditionally: the out-of-bounds exit may be used later even
// if signal handling isn't used for out-of-bounds at the moment.
- if (!GenerateOutOfBoundsExit(masm, module, &onThrow))
+ if (!GenerateOutOfBoundsStub(mg, &onThrow))
return false;
// Generate unconditionally: the async interrupt may be taken at any time.
- if (!GenerateAsyncInterruptExit(masm, module, &onThrow))
+ if (!GenerateAsyncInterruptStub(mg, usesHeap, &onThrow))
return false;
if (onThrow.used()) {
- if (!GenerateThrowStub(masm, module, &onThrow))
+ if (!GenerateThrowStub(mg, &onThrow))
return false;
}
return true;
}
--- a/js/src/asmjs/WasmStubs.h
+++ b/js/src/asmjs/WasmStubs.h
@@ -11,28 +11,23 @@
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef asmjs_wasm_stubs_h
-#define asmjs_wasm_stubs_h
+#ifndef wasm_stubs_h
+#define wasm_stubs_h
-#include "asmjs/Wasm.h"
+#include "asmjs/WasmGenerator.h"
namespace js {
-
-class AsmJSModule;
-namespace jit { class MacroAssembler; }
-
namespace wasm {
-typedef Vector<uint32_t> FuncOffsetVector;
-
bool
-GenerateStubs(jit::MacroAssembler& masm, AsmJSModule& module, const FuncOffsetVector& funcOffsets);
+GenerateStubs(ModuleGenerator& mg, Module::HeapBool usesHeap);
} // namespace wasm
} // namespace js
-#endif // asmjs_wasm_stubs_h
+
+#endif // wasm_stubs_h
new file mode 100644
--- /dev/null
+++ b/js/src/asmjs/WasmTypes.cpp
@@ -0,0 +1,292 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asmjs/WasmTypes.h"
+
+#include "jslibmath.h"
+#include "jsmath.h"
+
+#include "asmjs/AsmJSModule.h"
+#include "js/Conversions.h"
+#include "vm/Interpreter.h"
+
+#include "vm/Stack-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+#if defined(JS_CODEGEN_ARM)
+extern "C" {
+
+extern MOZ_EXPORT int64_t
+__aeabi_idivmod(int, int);
+
+extern MOZ_EXPORT int64_t
+__aeabi_uidivmod(int, int);
+
+}
+#endif
+
+namespace js {
+namespace wasm {
+
+void
+ReportOverRecursed()
+{
+ JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
+ ReportOverRecursed(cx);
+}
+
+bool
+HandleExecutionInterrupt()
+{
+ AsmJSActivation* act = JSRuntime::innermostAsmJSActivation();
+ act->module().wasm().setInterrupted(true);
+ bool ret = CheckForInterrupt(act->cx());
+ act->module().wasm().setInterrupted(false);
+ return ret;
+}
+
+} // namespace wasm
+} // namespace js
+
+static void
+OnDetached()
+{
+ // See hasDetachedHeap comment in LinkAsmJS.
+ JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
+ JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_OUT_OF_MEMORY);
+}
+
+static void
+OnOutOfBounds()
+{
+ JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
+ JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
+}
+
+static void
+OnImpreciseConversion()
+{
+ JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
+ JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_SIMD_FAILED_CONVERSION);
+}
+
+static int32_t
+CoerceInPlace_ToInt32(MutableHandleValue val)
+{
+ JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
+
+ int32_t i32;
+ if (!ToInt32(cx, val, &i32))
+ return false;
+ val.set(Int32Value(i32));
+
+ return true;
+}
+
+static int32_t
+CoerceInPlace_ToNumber(MutableHandleValue val)
+{
+ JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
+
+ double dbl;
+ if (!ToNumber(cx, val, &dbl))
+ return false;
+ val.set(DoubleValue(dbl));
+
+ return true;
+}
+
+// Use an int32_t return type instead of bool since bool does not have a
+// specified width and the caller is assuming a word-sized return.
+static int32_t
+InvokeImport_Void(int32_t importIndex, int32_t argc, Value* argv)
+{
+ AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation();
+ JSContext* cx = activation->cx();
+ Module& module = activation->module().wasm();
+
+ RootedValue rval(cx);
+ return module.callImport(cx, importIndex, argc, argv, &rval);
+}
+
+// Use an int32_t return type instead of bool since bool does not have a
+// specified width and the caller is assuming a word-sized return.
+static int32_t
+InvokeImport_I32(int32_t importIndex, int32_t argc, Value* argv)
+{
+ AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation();
+ JSContext* cx = activation->cx();
+ Module& module = activation->module().wasm();
+
+ RootedValue rval(cx);
+ if (!module.callImport(cx, importIndex, argc, argv, &rval))
+ return false;
+
+ int32_t i32;
+ if (!ToInt32(cx, rval, &i32))
+ return false;
+
+ argv[0] = Int32Value(i32);
+ return true;
+}
+
+// Use an int32_t return type instead of bool since bool does not have a
+// specified width and the caller is assuming a word-sized return.
+static int32_t
+InvokeImport_F64(int32_t importIndex, int32_t argc, Value* argv)
+{
+ AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation();
+ JSContext* cx = activation->cx();
+ Module& module = activation->module().wasm();
+
+ RootedValue rval(cx);
+ if (!module.callImport(cx, importIndex, argc, argv, &rval))
+ return false;
+
+ double dbl;
+ if (!ToNumber(cx, rval, &dbl))
+ return false;
+
+ argv[0] = DoubleValue(dbl);
+ return true;
+}
+
+template <class F>
+static inline void*
+FuncCast(F* pf, ABIFunctionType type)
+{
+ void *pv = JS_FUNC_TO_DATA_PTR(void*, pf);
+#ifdef JS_SIMULATOR
+ pv = Simulator::RedirectNativeFunction(pv, type);
+#endif
+ return pv;
+}
+
+void*
+wasm::AddressOf(SymbolicAddress imm, ExclusiveContext* cx)
+{
+ switch (imm) {
+ case SymbolicAddress::Runtime:
+ return cx->runtimeAddressForJit();
+ case SymbolicAddress::RuntimeInterruptUint32:
+ return cx->runtimeAddressOfInterruptUint32();
+ case SymbolicAddress::StackLimit:
+ return cx->stackLimitAddressForJitCode(StackForUntrustedScript);
+ case SymbolicAddress::ReportOverRecursed:
+ return FuncCast(wasm::ReportOverRecursed, Args_General0);
+ case SymbolicAddress::OnDetached:
+ return FuncCast(OnDetached, Args_General0);
+ case SymbolicAddress::OnOutOfBounds:
+ return FuncCast(OnOutOfBounds, Args_General0);
+ case SymbolicAddress::OnImpreciseConversion:
+ return FuncCast(OnImpreciseConversion, Args_General0);
+ case SymbolicAddress::HandleExecutionInterrupt:
+ return FuncCast(wasm::HandleExecutionInterrupt, Args_General0);
+ case SymbolicAddress::InvokeImport_Void:
+ return FuncCast(InvokeImport_Void, Args_General3);
+ case SymbolicAddress::InvokeImport_I32:
+ return FuncCast(InvokeImport_I32, Args_General3);
+ case SymbolicAddress::InvokeImport_F64:
+ return FuncCast(InvokeImport_F64, Args_General3);
+ case SymbolicAddress::CoerceInPlace_ToInt32:
+ return FuncCast(CoerceInPlace_ToInt32, Args_General1);
+ case SymbolicAddress::CoerceInPlace_ToNumber:
+ return FuncCast(CoerceInPlace_ToNumber, Args_General1);
+ case SymbolicAddress::ToInt32:
+ return FuncCast<int32_t (double)>(JS::ToInt32, Args_Int_Double);
+#if defined(JS_CODEGEN_ARM)
+ case SymbolicAddress::aeabi_idivmod:
+ return FuncCast(__aeabi_idivmod, Args_General2);
+ case SymbolicAddress::aeabi_uidivmod:
+ return FuncCast(__aeabi_uidivmod, Args_General2);
+ case SymbolicAddress::AtomicCmpXchg:
+ return FuncCast<int32_t (int32_t, int32_t, int32_t, int32_t)>(js::atomics_cmpxchg_asm_callout, Args_General4);
+ case SymbolicAddress::AtomicXchg:
+ return FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_xchg_asm_callout, Args_General3);
+ case SymbolicAddress::AtomicFetchAdd:
+ return FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_add_asm_callout, Args_General3);
+ case SymbolicAddress::AtomicFetchSub:
+ return FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_sub_asm_callout, Args_General3);
+ case SymbolicAddress::AtomicFetchAnd:
+ return FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_and_asm_callout, Args_General3);
+ case SymbolicAddress::AtomicFetchOr:
+ return FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_or_asm_callout, Args_General3);
+ case SymbolicAddress::AtomicFetchXor:
+ return FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_xor_asm_callout, Args_General3);
+#endif
+ case SymbolicAddress::ModD:
+ return FuncCast(NumberMod, Args_Double_DoubleDouble);
+ case SymbolicAddress::SinD:
+#ifdef _WIN64
+ // Workaround a VS 2013 sin issue, see math_sin_uncached.
+ return FuncCast<double (double)>(js::math_sin_uncached, Args_Double_Double);
+#else
+ return FuncCast<double (double)>(sin, Args_Double_Double);
+#endif
+ case SymbolicAddress::CosD:
+ return FuncCast<double (double)>(cos, Args_Double_Double);
+ case SymbolicAddress::TanD:
+ return FuncCast<double (double)>(tan, Args_Double_Double);
+ case SymbolicAddress::ASinD:
+ return FuncCast<double (double)>(asin, Args_Double_Double);
+ case SymbolicAddress::ACosD:
+ return FuncCast<double (double)>(acos, Args_Double_Double);
+ case SymbolicAddress::ATanD:
+ return FuncCast<double (double)>(atan, Args_Double_Double);
+ case SymbolicAddress::CeilD:
+ return FuncCast<double (double)>(ceil, Args_Double_Double);
+ case SymbolicAddress::CeilF:
+ return FuncCast<float (float)>(ceilf, Args_Float32_Float32);
+ case SymbolicAddress::FloorD:
+ return FuncCast<double (double)>(floor, Args_Double_Double);
+ case SymbolicAddress::FloorF:
+ return FuncCast<float (float)>(floorf, Args_Float32_Float32);
+ case SymbolicAddress::ExpD:
+ return FuncCast<double (double)>(exp, Args_Double_Double);
+ case SymbolicAddress::LogD:
+ return FuncCast<double (double)>(log, Args_Double_Double);
+ case SymbolicAddress::PowD:
+ return FuncCast(ecmaPow, Args_Double_DoubleDouble);
+ case SymbolicAddress::ATan2D:
+ return FuncCast(ecmaAtan2, Args_Double_DoubleDouble);
+ case SymbolicAddress::Limit:
+ break;
+ }
+
+ MOZ_CRASH("Bad SymbolicAddress");
+}
+
+CompileArgs::CompileArgs(ExclusiveContext* cx)
+ :
+#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
+ useSignalHandlersForOOB(cx->canUseSignalHandlers()),
+#else
+ useSignalHandlersForOOB(false),
+#endif
+ useSignalHandlersForInterrupt(cx->canUseSignalHandlers())
+{}
+
+bool
+CompileArgs::operator==(CompileArgs rhs) const
+{
+ return useSignalHandlersForOOB == rhs.useSignalHandlersForOOB &&
+ useSignalHandlersForInterrupt == rhs.useSignalHandlersForInterrupt;
+}
rename from js/src/asmjs/Wasm.h
rename to js/src/asmjs/WasmTypes.h
--- a/js/src/asmjs/Wasm.h
+++ b/js/src/asmjs/WasmTypes.h
@@ -11,30 +11,39 @@
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef asmjs_wasm_h
-#define asmjs_wasm_h
+#ifndef wasm_types_h
+#define wasm_types_h
+#include "mozilla/DebugOnly.h"
#include "mozilla/HashFunctions.h"
+#include "mozilla/Move.h"
+#include "mozilla/UniquePtr.h"
#include "ds/LifoAlloc.h"
#include "jit/IonTypes.h"
#include "js/Utility.h"
#include "js/Vector.h"
namespace js {
+
+class PropertyName;
+
namespace wasm {
using mozilla::Move;
+using mozilla::DebugOnly;
+using mozilla::UniquePtr;
+using mozilla::MallocSizeOf;
// The ValType enum represents the WebAssembly "value type", which are used to
// specify the type of locals and parameters.
// FIXME: uint8_t would make more sense for the underlying storage class, but
// causes miscompilations in GCC (fixed in 4.8.5 and 4.9.3).
enum class ValType
{
@@ -243,16 +252,86 @@ class LifoSig : public Sig<LifoAllocPoli
return nullptr;
ArgVector args(lifo);
if (!args.appendAll(src.args()))
return nullptr;
return new (mem) LifoSig(Move(args), src.ret());
}
};
+// The (,Profiling,Func)Offsets classes are used to record the offsets of
+// different key points in a CodeRange during compilation.
+
+struct Offsets
+{
+ MOZ_IMPLICIT Offsets(uint32_t begin = 0, uint32_t end = 0)
+ : begin(begin), end(end)
+ {}
+
+ // These define a [begin, end) contiguous range of instructions compiled
+ // into a CodeRange.
+ uint32_t begin;
+ uint32_t end;
+
+ void offsetBy(uint32_t offset) {
+ begin += offset;
+ end += offset;
+ }
+};
+
+struct ProfilingOffsets : Offsets
+{
+ MOZ_IMPLICIT ProfilingOffsets(uint32_t profilingReturn = 0)
+ : Offsets(), profilingReturn(profilingReturn)
+ {}
+
+ // For CodeRanges with ProfilingOffsets, 'begin' is the offset of the
+ // profiling entry.
+ uint32_t profilingEntry() const { return begin; }
+
+ // The profiling return is the offset of the return instruction, which
+ // precedes the 'end' by a variable number of instructions due to
+ // out-of-line codegen.
+ uint32_t profilingReturn;
+
+ void offsetBy(uint32_t offset) {
+ Offsets::offsetBy(offset);
+ profilingReturn += offset;
+ }
+};
+
+struct FuncOffsets : ProfilingOffsets
+{
+ MOZ_IMPLICIT FuncOffsets(uint32_t nonProfilingEntry = 0,
+ uint32_t profilingJump = 0,
+ uint32_t profilingEpilogue = 0)
+ : ProfilingOffsets(),
+ nonProfilingEntry(nonProfilingEntry),
+ profilingJump(profilingJump),
+ profilingEpilogue(profilingEpilogue)
+ {}
+
+ // Function CodeRanges have an additional non-profiling entry that comes
+ // after the profiling entry and a non-profiling epilogue that comes before
+ // the profiling epilogue.
+ uint32_t nonProfilingEntry;
+
+ // When profiling is enabled, the 'nop' at offset 'profilingJump' is
+ // overwritten to be a jump to 'profilingEpilogue'.
+ uint32_t profilingJump;
+ uint32_t profilingEpilogue;
+
+ void offsetBy(uint32_t offset) {
+ ProfilingOffsets::offsetBy(offset);
+ nonProfilingEntry += offset;
+ profilingJump += offset;
+ profilingEpilogue += offset;
+ }
+};
+
// While the frame-pointer chain allows the stack to be unwound without
// metadata, Error.stack still needs to know the line/column of every call in
// the chain. A CallSiteDesc describes a single callsite to which CallSite adds
// the metadata necessary to walk up to the next frame. Lastly CallSiteAndTarget
// adds the function index of the callee.
class CallSiteDesc
{
@@ -417,20 +496,24 @@ class HeapAccess {
public:
void offsetInsnOffsetBy(uint32_t) { MOZ_CRASH(); }
uint32_t insnOffset() const { MOZ_CRASH(); }
};
#endif
typedef Vector<HeapAccess, 0, SystemAllocPolicy> HeapAccessVector;
-// A wasm::Builtin represents a function implemented by the engine that is
-// called directly from wasm code and should show up in the callstack.
+// A wasm::SymbolicAddress represents a pointer to a well-known function or
+// object that is embedded in wasm code. Since wasm code is serialized and
+// later deserialized into a different address space, symbolic addresses must be
+// used for *all* pointers into the address space. The MacroAssembler records a
+// list of all SymbolicAddresses and the offsets of their use in the code for
+// later patching during static linking.
-enum class Builtin : uint16_t
+enum class SymbolicAddress
{
ToInt32,
#if defined(JS_CODEGEN_ARM)
aeabi_idivmod,
aeabi_uidivmod,
AtomicCmpXchg,
AtomicXchg,
AtomicFetchAdd,
@@ -449,132 +532,54 @@ enum class Builtin : uint16_t
CeilD,
CeilF,
FloorD,
FloorF,
ExpD,
LogD,
PowD,
ATan2D,
- Limit
-};
-
-// A wasm::SymbolicAddress represents a pointer to a well-known function or
-// object that is embedded in wasm code. Since wasm code is serialized and
-// later deserialized into a different address space, symbolic addresses must be
-// used for *all* pointers into the address space. The MacroAssembler records a
-// list of all SymbolicAddresses and the offsets of their use in the code for
-// later patching during static linking.
-
-enum class SymbolicAddress
-{
- ToInt32 = unsigned(Builtin::ToInt32),
-#if defined(JS_CODEGEN_ARM)
- aeabi_idivmod = unsigned(Builtin::aeabi_idivmod),
- aeabi_uidivmod = unsigned(Builtin::aeabi_uidivmod),
- AtomicCmpXchg = unsigned(Builtin::AtomicCmpXchg),
- AtomicXchg = unsigned(Builtin::AtomicXchg),
- AtomicFetchAdd = unsigned(Builtin::AtomicFetchAdd),
- AtomicFetchSub = unsigned(Builtin::AtomicFetchSub),
- AtomicFetchAnd = unsigned(Builtin::AtomicFetchAnd),
- AtomicFetchOr = unsigned(Builtin::AtomicFetchOr),
- AtomicFetchXor = unsigned(Builtin::AtomicFetchXor),
-#endif
- ModD = unsigned(Builtin::ModD),
- SinD = unsigned(Builtin::SinD),
- CosD = unsigned(Builtin::CosD),
- TanD = unsigned(Builtin::TanD),
- ASinD = unsigned(Builtin::ASinD),
- ACosD = unsigned(Builtin::ACosD),
- ATanD = unsigned(Builtin::ATanD),
- CeilD = unsigned(Builtin::CeilD),
- CeilF = unsigned(Builtin::CeilF),
- FloorD = unsigned(Builtin::FloorD),
- FloorF = unsigned(Builtin::FloorF),
- ExpD = unsigned(Builtin::ExpD),
- LogD = unsigned(Builtin::LogD),
- PowD = unsigned(Builtin::PowD),
- ATan2D = unsigned(Builtin::ATan2D),
Runtime,
RuntimeInterruptUint32,
StackLimit,
ReportOverRecursed,
OnDetached,
OnOutOfBounds,
OnImpreciseConversion,
HandleExecutionInterrupt,
- InvokeFromAsmJS_Ignore,
- InvokeFromAsmJS_ToInt32,
- InvokeFromAsmJS_ToNumber,
+ InvokeImport_Void,
+ InvokeImport_I32,
+ InvokeImport_F64,
CoerceInPlace_ToInt32,
CoerceInPlace_ToNumber,
Limit
};
-static inline SymbolicAddress
-BuiltinToImmediate(Builtin b)
-{
- return SymbolicAddress(b);
-}
+void*
+AddressOf(SymbolicAddress imm, ExclusiveContext* cx);
-static inline bool
-ImmediateIsBuiltin(SymbolicAddress imm, Builtin* builtin)
-{
- if (uint32_t(imm) < uint32_t(Builtin::Limit)) {
- *builtin = Builtin(imm);
- return true;
- }
- return false;
-}
+// The CompileArgs struct captures global parameters that affect all wasm code
+// generation. It also currently is the single source of truth for whether or
+// not to use signal handlers for different purposes.
-// An ExitReason describes the possible reasons for leaving compiled wasm code
-// or the state of not having left compiled wasm code (ExitReason::None).
-
-class ExitReason
+struct CompileArgs
{
- public:
- // List of reasons for execution leaving compiled wasm code (or None, if
- // control hasn't exited).
- enum Kind
- {
- None, // default state, the pc is in wasm code
- Jit, // fast-path exit to JIT code
- Slow, // general case exit to C++ Invoke
- Interrupt, // executing an interrupt callback
- Builtin // calling into a builtin (native) function
- };
-
- private:
- Kind kind_;
- wasm::Builtin builtin_;
+ bool useSignalHandlersForOOB;
+ bool useSignalHandlersForInterrupt;
- public:
- ExitReason() = default;
- MOZ_IMPLICIT ExitReason(Kind kind) : kind_(kind) { MOZ_ASSERT(kind != Builtin); }
- MOZ_IMPLICIT ExitReason(wasm::Builtin builtin) : kind_(Builtin), builtin_(builtin) {}
- Kind kind() const { return kind_; }
- wasm::Builtin builtin() const { MOZ_ASSERT(kind_ == Builtin); return builtin_; }
-
- uint32_t pack() const {
- static_assert(sizeof(wasm::Builtin) == 2, "fits");
- return uint16_t(kind_) | (uint16_t(builtin_) << 16);
- }
- static ExitReason unpack(uint32_t u32) {
- static_assert(sizeof(wasm::Builtin) == 2, "fits");
- ExitReason r;
- r.kind_ = Kind(uint16_t(u32));
- r.builtin_ = wasm::Builtin(uint16_t(u32 >> 16));
- return r;
- }
+ CompileArgs() = default;
+ explicit CompileArgs(ExclusiveContext* cx);
+ bool operator==(CompileArgs rhs) const;
+ bool operator!=(CompileArgs rhs) const { return !(*this == rhs); }
};
-// A hoisting of constants that would otherwise require #including WasmModule.h
-// everywhere. Values are asserted in WasmModule.h.
+// Constants:
static const unsigned ActivationGlobalDataOffset = 0;
-static const unsigned HeapGlobalDataOffset = sizeof(void*);
-static const unsigned NaN64GlobalDataOffset = 2 * sizeof(void*);
-static const unsigned NaN32GlobalDataOffset = 2 * sizeof(void*) + sizeof(double);
+static const unsigned HeapGlobalDataOffset = ActivationGlobalDataOffset + sizeof(void*);
+static const unsigned NaN64GlobalDataOffset = HeapGlobalDataOffset + sizeof(void*);
+static const unsigned NaN32GlobalDataOffset = NaN64GlobalDataOffset + sizeof(double);
+static const uint32_t InitialGlobalDataBytes = NaN32GlobalDataOffset + sizeof(float);
} // namespace wasm
} // namespace js
-#endif // asmjs_wasm_h
+#endif // wasm_types_h
--- a/js/src/builtin/AtomicsObject.cpp
+++ b/js/src/builtin/AtomicsObject.cpp
@@ -518,19 +518,19 @@ js::atomics_isLockFree(JSContext* cx, un
// To test this, either run on eg Raspberry Pi Model 1, or invoke the ARM
// simulator build with ARMHWCAP=vfp set. Do not set any other flags; other
// vfp/neon flags force ARMv7 to be set.
static void
GetCurrentAsmJSHeap(SharedMem<void*>* heap, size_t* length)
{
JSRuntime* rt = js::TlsPerThreadData.get()->runtimeFromMainThread();
- AsmJSModule& mod = rt->asmJSActivationStack()->module();
- *heap = mod.maybeHeap().cast<void*>();
- *length = mod.heapLength();
+ wasm::Module& module = rt->asmJSActivationStack()->module().wasm();
+ *heap = module.maybeHeap().cast<void*>();
+ *length = module.heapLength();
}
int32_t
js::atomics_add_asm_callout(int32_t vt, int32_t offset, int32_t value)
{
SharedMem<void*> heap;
size_t heapLength;
GetCurrentAsmJSHeap(&heap, &heapLength);
--- a/js/src/builtin/WeakSetObject.cpp
+++ b/js/src/builtin/WeakSetObject.cpp
@@ -6,16 +6,17 @@
#include "builtin/WeakSetObject.h"
#include "jsapi.h"
#include "jscntxt.h"
#include "jsiter.h"
#include "builtin/SelfHostingDefines.h"
+#include "builtin/WeakMapObject.h"
#include "vm/GlobalObject.h"
#include "vm/SelfHosting.h"
#include "jsobjinlines.h"
#include "vm/Interpreter-inl.h"
#include "vm/NativeObject-inl.h"
--- a/js/src/frontend/ParseNode.h
+++ b/js/src/frontend/ParseNode.h
@@ -4,16 +4,17 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef frontend_ParseNode_h
#define frontend_ParseNode_h
#include "mozilla/Attributes.h"
+#include "builtin/ModuleObject.h"
#include "frontend/TokenStream.h"
namespace js {
namespace frontend {
template <typename ParseHandler>
struct ParseContext;
--- a/js/src/jit-test/tests/asm.js/testProfiling.js
+++ b/js/src/jit-test/tests/asm.js/testProfiling.js
@@ -103,41 +103,43 @@ for (var i = 0; i < 3; i++) {
function testBuiltinD2D(name) {
var m = asmCompile('g', USE_ASM + "var fun=g.Math." + name + "; function f(d) { d=+d; return +fun(d) } return f");
for (var i = 0; i < 3; i++) {
var f = asmLink(m, this);
enableSingleStepProfiling();
assertEq(f(.1), eval("Math." + name + "(.1)"));
var stacks = disableSingleStepProfiling();
- assertStackContainsSeq(stacks, ">,f,>,Math." + name + ",f,>,f,>,>");
+ assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>");
}
}
for (name of ['sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'ceil', 'floor', 'exp', 'log'])
testBuiltinD2D(name);
+
function testBuiltinF2F(name) {
var m = asmCompile('g', USE_ASM + "var tof=g.Math.fround; var fun=g.Math." + name + "; function f(d) { d=tof(d); return tof(fun(d)) } return f");
for (var i = 0; i < 3; i++) {
var f = asmLink(m, this);
enableSingleStepProfiling();
assertEq(f(.1), eval("Math.fround(Math." + name + "(Math.fround(.1)))"));
var stacks = disableSingleStepProfiling();
- assertStackContainsSeq(stacks, ">,f,>,Math." + name + ",f,>,f,>,>");
+ assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>");
}
}
for (name of ['ceil', 'floor'])
testBuiltinF2F(name);
+
function testBuiltinDD2D(name) {
var m = asmCompile('g', USE_ASM + "var fun=g.Math." + name + "; function f(d, e) { d=+d; e=+e; return +fun(d,e) } return f");
for (var i = 0; i < 3; i++) {
var f = asmLink(m, this);
enableSingleStepProfiling();
assertEq(f(.1, .2), eval("Math." + name + "(.1, .2)"));
var stacks = disableSingleStepProfiling();
- assertStackContainsSeq(stacks, ">,f,>,Math." + name + ",f,>,f,>,>");
+ assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>");
}
}
for (name of ['atan2', 'pow'])
testBuiltinDD2D(name);
// FFI tests:
setJitCompilerOption("ion.warmup.trigger", 10);
setJitCompilerOption("baseline.warmup.trigger", 0);
--- a/js/src/jit/BaselineJIT.cpp
+++ b/js/src/jit/BaselineJIT.cpp
@@ -3,17 +3,17 @@
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/BaselineJIT.h"
#include "mozilla/MemoryReporting.h"
-#include "asmjs/AsmJSModule.h"
+#include "asmjs/WasmModule.h"
#include "jit/BaselineCompiler.h"
#include "jit/BaselineIC.h"
#include "jit/CompileInfo.h"
#include "jit/JitCommon.h"
#include "jit/JitSpewer.h"
#include "vm/Debugger.h"
#include "vm/Interpreter.h"
#include "vm/TraceLogging.h"
@@ -46,17 +46,17 @@ BaselineScript::BaselineScript(uint32_t
uint32_t profilerEnterToggleOffset,
uint32_t profilerExitToggleOffset,
uint32_t traceLoggerEnterToggleOffset,
uint32_t traceLoggerExitToggleOffset,
uint32_t postDebugPrologueOffset)
: method_(nullptr),
templateScope_(nullptr),
fallbackStubSpace_(),
- dependentAsmJSModules_(nullptr),
+ dependentWasmModules_(nullptr),
prologueOffset_(prologueOffset),
epilogueOffset_(epilogueOffset),
profilerEnterToggleOffset_(profilerEnterToggleOffset),
profilerExitToggleOffset_(profilerExitToggleOffset),
#ifdef JS_TRACE_LOGGING
# ifdef DEBUG
traceLoggerScriptsEnabled_(false),
traceLoggerEngineEnabled_(false),
@@ -480,70 +480,67 @@ BaselineScript::Destroy(FreeOp* fop, Bas
* destroy scripts outside the context of a GC, this situation can result
* in invalid store buffer entries. Assert that if we do destroy scripts
* outside of a GC that we at least emptied the nursery first.
*/
MOZ_ASSERT(fop->runtime()->gc.nursery.isEmpty());
MOZ_ASSERT(!script->hasPendingIonBuilder());
- script->unlinkDependentAsmJSModules(fop);
+ script->unlinkDependentWasmModules(fop);
fop->delete_(script);
}
void
-BaselineScript::clearDependentAsmJSModules()
+BaselineScript::clearDependentWasmModules()
{
- // Remove any links from AsmJSModules that contain optimized FFI calls into
+ // Remove any links from wasm:;Modules that contain optimized import calls into
// this BaselineScript.
- if (dependentAsmJSModules_) {
- for (size_t i = 0; i < dependentAsmJSModules_->length(); i++) {
- DependentAsmJSModuleExit exit = (*dependentAsmJSModules_)[i];
- exit.module->exit(exit.exitIndex).deoptimize(*exit.module);
- }
-
- dependentAsmJSModules_->clear();
+ if (dependentWasmModules_) {
+ for (DependentWasmModuleImport dep : *dependentWasmModules_)
+ dep.module->deoptimizeImportExit(dep.importIndex);
+ dependentWasmModules_->clear();
}
}
void
-BaselineScript::unlinkDependentAsmJSModules(FreeOp* fop)
+BaselineScript::unlinkDependentWasmModules(FreeOp* fop)
{
- // Remove any links from AsmJSModules that contain optimized FFI calls into
+ // Remove any links from wasm::Modules that contain optimized FFI calls into
// this BaselineScript.
- clearDependentAsmJSModules();
- if (dependentAsmJSModules_) {
- fop->delete_(dependentAsmJSModules_);
- dependentAsmJSModules_ = nullptr;
+ clearDependentWasmModules();
+ if (dependentWasmModules_) {
+ fop->delete_(dependentWasmModules_);
+ dependentWasmModules_ = nullptr;
}
}
bool
-BaselineScript::addDependentAsmJSModule(JSContext* cx, DependentAsmJSModuleExit exit)
+BaselineScript::addDependentWasmModule(JSContext* cx, wasm::Module& module, uint32_t importIndex)
{
- if (!dependentAsmJSModules_) {
- dependentAsmJSModules_ = cx->new_<Vector<DependentAsmJSModuleExit> >(cx);
- if (!dependentAsmJSModules_)
+ if (!dependentWasmModules_) {
+ dependentWasmModules_ = cx->new_<Vector<DependentWasmModuleImport> >(cx);
+ if (!dependentWasmModules_)
return false;
}
- return dependentAsmJSModules_->append(exit);
+ return dependentWasmModules_->emplaceBack(&module, importIndex);
}
void
-BaselineScript::removeDependentAsmJSModule(DependentAsmJSModuleExit exit)
+BaselineScript::removeDependentWasmModule(wasm::Module& module, uint32_t importIndex)
{
- if (!dependentAsmJSModules_)
+ if (!dependentWasmModules_)
return;
- for (size_t i = 0; i < dependentAsmJSModules_->length(); i++) {
- if ((*dependentAsmJSModules_)[i].module == exit.module &&
- (*dependentAsmJSModules_)[i].exitIndex == exit.exitIndex)
+ for (size_t i = 0; i < dependentWasmModules_->length(); i++) {
+ if ((*dependentWasmModules_)[i].module == &module &&
+ (*dependentWasmModules_)[i].importIndex == importIndex)
{
- dependentAsmJSModules_->erase(dependentAsmJSModules_->begin() + i);
+ dependentWasmModules_->erase(dependentWasmModules_->begin() + i);
break;
}
}
}
ICEntry&
BaselineScript::icEntry(size_t index)
{
--- a/js/src/jit/BaselineJIT.h
+++ b/js/src/jit/BaselineJIT.h
@@ -89,26 +89,26 @@ struct PCMappingIndexEntry
// Native code offset.
uint32_t nativeOffset;
// Offset in the CompactBuffer where data for pcOffset starts.
uint32_t bufferOffset;
};
-// Describes a single AsmJSModule which jumps (via an FFI exit with the given
-// index) directly to a BaselineScript or IonScript.
-struct DependentAsmJSModuleExit
+// Describes a single wasm::Module::ImportExit which jumps (via an import with
+// the given index) directly to a BaselineScript or IonScript.
+struct DependentWasmModuleImport
{
- const AsmJSModule* module;
- size_t exitIndex;
+ wasm::Module* module;
+ size_t importIndex;
- DependentAsmJSModuleExit(const AsmJSModule* module, size_t exitIndex)
+ DependentWasmModuleImport(wasm::Module* module, size_t importIndex)
: module(module),
- exitIndex(exitIndex)
+ importIndex(importIndex)
{ }
};
struct BaselineScript
{
public:
static const uint32_t MAX_JSSCRIPT_LENGTH = 0x0fffffffu;
@@ -124,19 +124,19 @@ struct BaselineScript
// For functions with a call object, template objects to use for the call
// object and decl env object (linked via the call object's enclosing
// scope).
RelocatablePtrObject templateScope_;
// Allocated space for fallback stubs.
FallbackICStubSpace fallbackStubSpace_;
- // If non-null, the list of AsmJSModules that contain an optimized call
+ // If non-null, the list of wasm::Modules that contain an optimized call
// directly to this script.
- Vector<DependentAsmJSModuleExit>* dependentAsmJSModules_;
+ Vector<DependentWasmModuleImport>* dependentWasmModules_;
// Native code offset right before the scope chain is initialized.
uint32_t prologueOffset_;
// Native code offset right before the frame is popped and the method
// returned from.
uint32_t epilogueOffset_;
@@ -395,20 +395,20 @@ struct BaselineScript
uint8_t* nativeCodeForPC(JSScript* script, jsbytecode* pc,
PCMappingSlotInfo* slotInfo = nullptr);
// Return the bytecode offset for a given native code address. Be careful
// when using this method: we don't emit code for some bytecode ops, so
// the result may not be accurate.
jsbytecode* approximatePcForNativeAddress(JSScript* script, uint8_t* nativeAddress);
- bool addDependentAsmJSModule(JSContext* cx, DependentAsmJSModuleExit exit);
- void unlinkDependentAsmJSModules(FreeOp* fop);
- void clearDependentAsmJSModules();
- void removeDependentAsmJSModule(DependentAsmJSModuleExit exit);
+ bool addDependentWasmModule(JSContext* cx, wasm::Module& module, uint32_t importIndex);
+ void unlinkDependentWasmModules(FreeOp* fop);
+ void clearDependentWasmModules();
+ void removeDependentWasmModule(wasm::Module& module, uint32_t importIndex);
// Toggle debug traps (used for breakpoints and step mode) in the script.
// If |pc| is nullptr, toggle traps for all ops in the script. Else, only
// toggle traps at |pc|.
void toggleDebugTraps(JSScript* script, jsbytecode* pc);
void toggleProfilerInstrumentation(bool enable);
bool isProfilerInstrumentationOn() const {
@@ -475,17 +475,17 @@ struct BaselineScript
MOZ_ASSERT(!builder || !hasPendingIonBuilder());
if (script->isIonCompilingOffThread())
script->setIonScript(maybecx, ION_PENDING_SCRIPT);
pendingBuilder_ = builder;
// lazy linking cannot happen during asmjs to ion.
- clearDependentAsmJSModules();
+ clearDependentWasmModules();
script->updateBaselineOrIonRaw(maybecx);
}
void removePendingIonBuilder(JSScript* script) {
setPendingIonBuilder(nullptr, script, nullptr);
if (script->maybeIonScript() == ION_PENDING_SCRIPT)
script->setIonScript(nullptr, nullptr);
}
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -156,17 +156,17 @@ CodeGenerator::CodeGenerator(MIRGenerato
, ionScriptLabels_(gen->alloc())
, scriptCounts_(nullptr)
, simdRefreshTemplatesDuringLink_(0)
{
}
CodeGenerator::~CodeGenerator()
{
- MOZ_ASSERT_IF(!gen->compilingAsmJS(), masm.numAsmJSAbsoluteLinks() == 0);
+ MOZ_ASSERT_IF(!gen->compilingAsmJS(), masm.numAsmJSAbsoluteAddresses() == 0);
js_delete(scriptCounts_);
}
typedef bool (*StringToNumberFn)(ExclusiveContext*, JSString*, double*);
static const VMFunction StringToNumberInfo = FunctionInfo<StringToNumberFn>(StringToNumber);
void
CodeGenerator::visitValueToInt32(LValueToInt32* lir)
@@ -7873,21 +7873,21 @@ CodeGenerator::visitRest(LRest* lir)
masm.movePtr(ImmPtr(nullptr), temp2);
}
masm.bind(&joinAlloc);
emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject, false, ToRegister(lir->output()));
}
bool
-CodeGenerator::generateAsmJS(AsmJSFunctionOffsets* offsets)
+CodeGenerator::generateAsmJS(wasm::FuncOffsets* offsets)
{
JitSpew(JitSpew_Codegen, "# Emitting asm.js code");
- GenerateAsmJSFunctionPrologue(masm, frameSize(), offsets);
+ wasm::GenerateFunctionPrologue(masm, frameSize(), offsets);
// Overflow checks are omitted by CodeGenerator in some cases (leaf
// functions with small framePushed). Perform overflow-checking after
// pushing framePushed to catch cases with really large frames.
Label onOverflow;
if (!omitOverRecursedCheck()) {
// See comment below.
Label* target = frameSize() > 0 ? &onOverflow : masm.asmStackOverflowLabel();
@@ -7897,17 +7897,17 @@ CodeGenerator::generateAsmJS(AsmJSFuncti
target);
}
if (!generateBody())
return false;
masm.bind(&returnLabel_);
- GenerateAsmJSFunctionEpilogue(masm, frameSize(), offsets);
+ wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
if (onOverflow.used()) {
// The stack overflow stub assumes that only sizeof(AsmJSFrame) bytes have
// been pushed. The overflow check occurs after incrementing by
// framePushed, so pop that before jumping to the overflow exit.
masm.bind(&onOverflow);
masm.addToStackPtr(Imm32(frameSize()));
masm.jump(masm.asmStackOverflowLabel());
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -56,17 +56,17 @@ class CodeGenerator : public CodeGenerat
ConstantOrRegister toConstantOrRegister(LInstruction* lir, size_t n, MIRType type);
public:
CodeGenerator(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm = nullptr);
~CodeGenerator();
public:
bool generate();
- bool generateAsmJS(AsmJSFunctionOffsets *offsets);
+ bool generateAsmJS(wasm::FuncOffsets *offsets);
bool link(JSContext* cx, CompilerConstraintList* constraints);
bool linkSharedStubs(JSContext* cx);
void visitOsiPoint(LOsiPoint* lir);
void visitGoto(LGoto* lir);
void visitTableSwitch(LTableSwitch* ins);
void visitTableSwitchV(LTableSwitchV* ins);
void visitCloneLiteral(LCloneLiteral* lir);
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -3265,29 +3265,31 @@ AutoFlushICache::setRange(uintptr_t star
// handler on MacOS running the ARM simulator.
void
AutoFlushICache::flush(uintptr_t start, size_t len)
{
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
PerThreadData* pt = TlsPerThreadData.get();
AutoFlushICache* afc = pt ? pt->PerThreadData::autoFlushICache() : nullptr;
if (!afc) {
+ MOZ_ASSERT(!IsCompilingAsmJS(), "asm.js should always create an AutoFlushICache");
JitSpewCont(JitSpew_CacheFlush, "#");
ExecutableAllocator::cacheFlush((void*)start, len);
MOZ_ASSERT(len <= 32);
return;
}
uintptr_t stop = start + len;
if (start >= afc->start_ && stop <= afc->stop_) {
// Update is within the pending flush range, so defer to the end of the context.
JitSpewCont(JitSpew_CacheFlush, afc->inhibit_ ? "-" : "=");
return;
}
+ MOZ_ASSERT(!IsCompilingAsmJS(), "asm.js should always flush within the range");
JitSpewCont(JitSpew_CacheFlush, afc->inhibit_ ? "x" : "*");
ExecutableAllocator::cacheFlush((void*)start, len);
#endif
}
// Flag the current dynamic AutoFlushICache as inhibiting flushing. Useful in error paths
// where the changes are being abandoned.
void
--- a/js/src/jit/Linker.h
+++ b/js/src/jit/Linker.h
@@ -32,17 +32,17 @@ class Linker
explicit Linker(MacroAssembler& masm)
: masm(masm)
{
masm.finish();
}
template <AllowGC allowGC>
JitCode* newCode(JSContext* cx, CodeKind kind) {
- MOZ_ASSERT(masm.numAsmJSAbsoluteLinks() == 0);
+ MOZ_ASSERT(masm.numAsmJSAbsoluteAddresses() == 0);
gc::AutoSuppressGC suppressGC(cx);
if (masm.oom())
return fail(cx);
ExecutablePool* pool;
size_t bytesNeeded = masm.bytesNeeded() + sizeof(JitCode*) + CodeAlignment;
if (bytesNeeded >= MAX_BUFFER_SIZE)
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -13838,27 +13838,27 @@ class MAsmJSCall final
class Callee {
public:
enum Which { Internal, Dynamic, Builtin };
private:
Which which_;
union {
AsmJSInternalCallee internal_;
MDefinition* dynamic_;
- wasm::Builtin builtin_;
+ wasm::SymbolicAddress builtin_;
} u;
public:
Callee() {}
explicit Callee(AsmJSInternalCallee callee) : which_(Internal) { u.internal_ = callee; }
explicit Callee(MDefinition* callee) : which_(Dynamic) { u.dynamic_ = callee; }
- explicit Callee(wasm::Builtin callee) : which_(Builtin) { u.builtin_ = callee; }
+ explicit Callee(wasm::SymbolicAddress callee) : which_(Builtin) { u.builtin_ = callee; }
Which which() const { return which_; }
AsmJSInternalCallee internal() const { MOZ_ASSERT(which_ == Internal); return u.internal_; }
MDefinition* dynamic() const { MOZ_ASSERT(which_ == Dynamic); return u.dynamic_; }
- wasm::Builtin builtin() const { MOZ_ASSERT(which_ == Builtin); return u.builtin_; }
+ wasm::SymbolicAddress builtin() const { MOZ_ASSERT(which_ == Builtin); return u.builtin_; }
};
private:
wasm::CallSiteDesc desc_;
Callee callee_;
FixedList<AnyRegister> argRegs_;
size_t spIncrement_;
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -1,17 +1,16 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/MIRGraph.h"
-#include "asmjs/AsmJSValidate.h"
#include "jit/BytecodeAnalysis.h"
#include "jit/Ion.h"
#include "jit/JitSpewer.h"
#include "jit/MIR.h"
#include "jit/MIRGenerator.h"
using namespace js;
using namespace js::jit;
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -376,25 +376,24 @@ class MacroAssembler : public MacroAssem
// This constructor should only be used when there is no JitContext active
// (for example, Trampoline-$(ARCH).cpp and IonCaches.cpp).
explicit MacroAssembler(JSContext* cx, IonScript* ion = nullptr,
JSScript* script = nullptr, jsbytecode* pc = nullptr);
// asm.js compilation handles its own JitContext-pushing
struct AsmJSToken {};
- explicit MacroAssembler(AsmJSToken, TempAllocator *alloc)
+ explicit MacroAssembler(AsmJSToken, TempAllocator& alloc)
: framePushed_(0),
#ifdef DEBUG
inCall_(false),
#endif
emitProfilingInstrumentation_(false)
{
- if (alloc)
- moveResolver_.setAllocator(*alloc);
+ moveResolver_.setAllocator(alloc);
#if defined(JS_CODEGEN_ARM)
initWithAllocator();
m_buffer.id = 0;
#elif defined(JS_CODEGEN_ARM64)
initWithAllocator();
armbuffer_.id = 0;
#endif
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -3351,17 +3351,17 @@ void Assembler::UpdateBoundsCheck(uint32
MOZ_ASSERT(op.isImm8());
#endif
Imm8 imm8 = Imm8(heapSize);
MOZ_ASSERT(!imm8.invalid);
*inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always);
// NOTE: we don't update the Auto Flush Cache! this function is currently
- // only called from within AsmJSModule::patchHeapAccesses, which does that
+ // only called from within ModuleGenerator::finish, which does that
// for us. Don't call this!
}
InstructionIterator::InstructionIterator(Instruction* i_)
: i(i_)
{
// Work around pools with an artificial pool guard and around nop-fill.
i = i->skipPool();
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -1940,17 +1940,17 @@ void
MacroAssemblerARMCompat::movePtr(wasm::SymbolicAddress imm, Register dest)
{
RelocStyle rs;
if (HasMOVWT())
rs = L_MOVWT;
else
rs = L_LDR;
- append(AsmJSAbsoluteLink(CodeOffset(currentOffset()), imm));
+ append(AsmJSAbsoluteAddress(CodeOffset(currentOffset()), imm));
ma_movPatchable(Imm32(-1), dest, Always, rs);
}
void
MacroAssemblerARMCompat::load8ZeroExtend(const Address& address, Register dest)
{
ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest);
}
--- a/js/src/jit/arm64/MacroAssembler-arm64.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -773,17 +773,17 @@ class MacroAssemblerCompat : public vixl
void movePtr(ImmWord imm, Register dest) {
Mov(ARMRegister(dest, 64), int64_t(imm.value));
}
void movePtr(ImmPtr imm, Register dest) {
Mov(ARMRegister(dest, 64), int64_t(imm.value));
}
void movePtr(wasm::SymbolicAddress imm, Register dest) {
BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest);
- append(AsmJSAbsoluteLink(CodeOffset(off.getOffset()), imm));
+ append(AsmJSAbsoluteAddress(CodeOffset(off.getOffset()), imm));
}
void movePtr(ImmGCPtr imm, Register dest) {
BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest);
writeDataRelocation(imm, load);
}
void move64(Register64 src, Register64 dest) {
movePtr(src.reg, dest.reg);
}
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -751,17 +751,17 @@ MacroAssemblerMIPSCompat::movePtr(ImmGCP
void
MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest)
{
movePtr(ImmWord(uintptr_t(imm.value)), dest);
}
void
MacroAssemblerMIPSCompat::movePtr(wasm::SymbolicAddress imm, Register dest)
{
- append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm));
+ append(AsmJSAbsoluteAddress(CodeOffset(nextOffset().getOffset()), imm));
ma_liPatchable(dest, ImmWord(-1));
}
void
MacroAssemblerMIPSCompat::load8ZeroExtend(const Address& address, Register dest)
{
ma_load(dest, address, SizeByte, ZeroExtend);
}
--- a/js/src/jit/mips32/Simulator-mips32.cpp
+++ b/js/src/jit/mips32/Simulator-mips32.cpp
@@ -30,17 +30,16 @@
#include "mozilla/Casting.h"
#include "mozilla/FloatingPoint.h"
#include "mozilla/Likely.h"
#include "mozilla/MathAlgorithms.h"
#include <float.h>
-#include "asmjs/AsmJSValidate.h"
#include "jit/mips32/Assembler-mips32.h"
#include "vm/Runtime.h"
namespace js {
namespace jit {
static const Instr kCallRedirInstr = op_special | MAX_BREAK_CODE << FunctionBits | ff_break;
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -890,17 +890,17 @@ MacroAssemblerMIPS64Compat::movePtr(ImmG
void
MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest)
{
movePtr(ImmWord(uintptr_t(imm.value)), dest);
}
void
MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm, Register dest)
{
- append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm));
+ append(AsmJSAbsoluteAddress(CodeOffset(nextOffset().getOffset()), imm));
ma_liPatchable(dest, ImmWord(-1));
}
void
MacroAssemblerMIPS64Compat::load8ZeroExtend(const Address& address, Register dest)
{
ma_load(dest, address, SizeByte, ZeroExtend);
}
--- a/js/src/jit/mips64/Simulator-mips64.cpp
+++ b/js/src/jit/mips64/Simulator-mips64.cpp
@@ -31,17 +31,16 @@
#include "mozilla/Casting.h"
#include "mozilla/FloatingPoint.h"
#include "mozilla/Likely.h"
#include "mozilla/MathAlgorithms.h"
#include <float.h>
-#include "asmjs/AsmJSValidate.h"
#include "jit/mips64/Assembler-mips64.h"
#include "vm/Runtime.h"
#define I8(v) static_cast<int8_t>(v)
#define I16(v) static_cast<int16_t>(v)
#define U16(v) static_cast<uint16_t>(v)
#define I32(v) static_cast<int32_t>(v)
#define U32(v) static_cast<uint32_t>(v)
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -6,17 +6,17 @@
#ifndef jit_shared_Assembler_shared_h
#define jit_shared_Assembler_shared_h
#include "mozilla/PodOperations.h"
#include <limits.h>
-#include "asmjs/AsmJSFrameIterator.h"
+#include "asmjs/WasmTypes.h"
#include "jit/JitAllocPolicy.h"
#include "jit/Label.h"
#include "jit/Registers.h"
#include "jit/RegisterSets.h"
#include "vm/HelperThreads.h"
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
// Push return addresses callee-side.
@@ -676,20 +676,20 @@ struct AsmJSGlobalAccess
AsmJSGlobalAccess(CodeOffset patchAt, unsigned globalDataOffset)
: patchAt(patchAt), globalDataOffset(globalDataOffset)
{}
};
// Represents an instruction to be patched and the intended pointee. These
// links are accumulated in the MacroAssembler, but patching is done outside
-// the MacroAssembler (in AsmJSModule::staticallyLink).
-struct AsmJSAbsoluteLink
+// the MacroAssembler (in Module::staticallyLink).
+struct AsmJSAbsoluteAddress
{
- AsmJSAbsoluteLink(CodeOffset patchAt, wasm::SymbolicAddress target)
+ AsmJSAbsoluteAddress(CodeOffset patchAt, wasm::SymbolicAddress target)
: patchAt(patchAt), target(target) {}
CodeOffset patchAt;
wasm::SymbolicAddress target;
};
// Represents a call from an asm.js function to another asm.js function,
// represented by the index of the callee in the Module Validator
@@ -706,17 +706,17 @@ struct AsmJSInternalCallee
};
// The base class of all Assemblers for all archs.
class AssemblerShared
{
wasm::CallSiteAndTargetVector callsites_;
wasm::HeapAccessVector heapAccesses_;
Vector<AsmJSGlobalAccess, 0, SystemAllocPolicy> asmJSGlobalAccesses_;
- Vector<AsmJSAbsoluteLink, 0, SystemAllocPolicy> asmJSAbsoluteLinks_;
+ Vector<AsmJSAbsoluteAddress, 0, SystemAllocPolicy> asmJSAbsoluteAddresses_;
protected:
Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
bool enoughMemory_;
bool embedsNurseryPointers_;
public:
@@ -753,19 +753,19 @@ class AssemblerShared
void append(wasm::HeapAccess access) { enoughMemory_ &= heapAccesses_.append(access); }
wasm::HeapAccessVector&& extractHeapAccesses() { return Move(heapAccesses_); }
void append(AsmJSGlobalAccess access) { enoughMemory_ &= asmJSGlobalAccesses_.append(access); }
size_t numAsmJSGlobalAccesses() const { return asmJSGlobalAccesses_.length(); }
AsmJSGlobalAccess asmJSGlobalAccess(size_t i) const { return asmJSGlobalAccesses_[i]; }
- void append(AsmJSAbsoluteLink link) { enoughMemory_ &= asmJSAbsoluteLinks_.append(link); }
- size_t numAsmJSAbsoluteLinks() const { return asmJSAbsoluteLinks_.length(); }
- AsmJSAbsoluteLink asmJSAbsoluteLink(size_t i) const { return asmJSAbsoluteLinks_[i]; }
+ void append(AsmJSAbsoluteAddress link) { enoughMemory_ &= asmJSAbsoluteAddresses_.append(link); }
+ size_t numAsmJSAbsoluteAddresses() const { return asmJSAbsoluteAddresses_.length(); }
+ AsmJSAbsoluteAddress asmJSAbsoluteAddress(size_t i) const { return asmJSAbsoluteAddresses_[i]; }
static bool canUseInSingleByteInstruction(Register reg) { return true; }
void addCodeLabel(CodeLabel label) {
propagateOOM(codeLabels_.append(label));
}
size_t numCodeLabels() const {
return codeLabels_.length();
@@ -787,20 +787,20 @@ class AssemblerShared
for (; i < heapAccesses_.length(); i++)
heapAccesses_[i].offsetInsnOffsetBy(delta);
i = asmJSGlobalAccesses_.length();
enoughMemory_ &= asmJSGlobalAccesses_.appendAll(other.asmJSGlobalAccesses_);
for (; i < asmJSGlobalAccesses_.length(); i++)
asmJSGlobalAccesses_[i].patchAt.offsetBy(delta);
- i = asmJSAbsoluteLinks_.length();
- enoughMemory_ &= asmJSAbsoluteLinks_.appendAll(other.asmJSAbsoluteLinks_);
- for (; i < asmJSAbsoluteLinks_.length(); i++)
- asmJSAbsoluteLinks_[i].patchAt.offsetBy(delta);
+ i = asmJSAbsoluteAddresses_.length();
+ enoughMemory_ &= asmJSAbsoluteAddresses_.appendAll(other.asmJSAbsoluteAddresses_);
+ for (; i < asmJSAbsoluteAddresses_.length(); i++)
+ asmJSAbsoluteAddresses_[i].patchAt.offsetBy(delta);
i = codeLabels_.length();
enoughMemory_ &= codeLabels_.appendAll(other.codeLabels_);
for (; i < codeLabels_.length(); i++)
codeLabels_[i].offsetBy(delta);
return !oom();
}
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -1515,17 +1515,17 @@ CodeGeneratorShared::emitAsmJSCall(LAsmJ
switch (callee.which()) {
case MAsmJSCall::Callee::Internal:
masm.call(mir->desc(), callee.internal());
break;
case MAsmJSCall::Callee::Dynamic:
masm.call(mir->desc(), ToRegister(ins->getOperand(mir->dynamicCalleeOperandIndex())));
break;
case MAsmJSCall::Callee::Builtin:
- masm.call(BuiltinToImmediate(callee.builtin()));
+ masm.call(callee.builtin());
break;
}
if (mir->spIncrement())
masm.reserveStack(mir->spIncrement());
}
void
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -596,17 +596,17 @@ class Assembler : public AssemblerX86Sha
else
movq(word, dest);
}
void mov(ImmPtr imm, Register dest) {
movq(imm, dest);
}
void mov(wasm::SymbolicAddress imm, Register dest) {
masm.movq_i64r(-1, dest.encoding());
- append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm));
+ append(AsmJSAbsoluteAddress(CodeOffset(masm.currentOffset()), imm));
}
void mov(const Operand& src, Register dest) {
movq(src, dest);
}
void mov(Register src, const Operand& dest) {
movq(src, dest);
}
void mov(Imm32 imm32, const Operand& dest) {
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -283,17 +283,17 @@ class Assembler : public AssemblerX86Sha
else
movl(imm, dest);
}
void mov(ImmPtr imm, Register dest) {
mov(ImmWord(uintptr_t(imm.value)), dest);
}
void mov(wasm::SymbolicAddress imm, Register dest) {
masm.movl_i32r(-1, dest.encoding());
- append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm));
+ append(AsmJSAbsoluteAddress(CodeOffset(masm.currentOffset()), imm));
}
void mov(const Operand& src, Register dest) {
movl(src, dest);
}
void mov(Register src, const Operand& dest) {
movl(src, dest);
}
void mov(Imm32 imm, const Operand& dest) {
@@ -362,21 +362,21 @@ class Assembler : public AssemblerX86Sha
writeDataRelocation(rhs);
break;
default:
MOZ_CRASH("unexpected operand kind");
}
}
void cmpl(Register rhs, wasm::SymbolicAddress lhs) {
masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1);
- append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), lhs));
+ append(AsmJSAbsoluteAddress(CodeOffset(masm.currentOffset()), lhs));
}
void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) {
JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1);
- append(AsmJSAbsoluteLink(CodeOffset(src.offset()), lhs));
+ append(AsmJSAbsoluteAddress(CodeOffset(src.offset()), lhs));
}
void adcl(Imm32 imm, Register dest) {
masm.adcl_ir(imm.value, dest.encoding());
}
void adcl(Register src, Register dest) {
masm.adcl_rr(src.encoding(), dest.encoding());
}
--- a/js/src/jsopcode.cpp
+++ b/js/src/jsopcode.cpp
@@ -28,17 +28,16 @@
#include "jsnum.h"
#include "jsobj.h"
#include "jsprf.h"
#include "jsscript.h"
#include "jsstr.h"
#include "jstypes.h"
#include "jsutil.h"
-#include "asmjs/AsmJSModule.h"
#include "frontend/BytecodeCompiler.h"
#include "frontend/SourceNotes.h"
#include "gc/GCInternals.h"
#include "js/CharacterEncoding.h"
#include "vm/CodeCoverage.h"
#include "vm/Opcodes.h"
#include "vm/ScopeObject.h"
#include "vm/Shape.h"
--- a/js/src/jsscript.h
+++ b/js/src/jsscript.h
@@ -820,16 +820,19 @@ class ScriptSourceHolder
: ss(ss)
{
ss->incref();
}
~ScriptSourceHolder()
{
ss->decref();
}
+ ScriptSource* get() const {
+ return ss;
+ }
};
struct CompressedSourceHasher
{
typedef ScriptSource* Lookup;
static HashNumber computeHash(const void* data, size_t nbytes) {
return mozilla::HashBytes(data, nbytes);
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -137,24 +137,26 @@ EXPORTS.js += [
'../public/UbiNodePostOrder.h',
'../public/Utility.h',
'../public/Value.h',
'../public/Vector.h',
'../public/WeakMapPtr.h',
]
UNIFIED_SOURCES += [
- 'asmjs/AsmJSFrameIterator.cpp',
'asmjs/AsmJSLink.cpp',
'asmjs/AsmJSModule.cpp',
- 'asmjs/AsmJSSignalHandlers.cpp',
'asmjs/AsmJSValidate.cpp',
+ 'asmjs/WasmFrameIterator.cpp',
'asmjs/WasmGenerator.cpp',
'asmjs/WasmIonCompile.cpp',
+ 'asmjs/WasmModule.cpp',
+ 'asmjs/WasmSignalHandlers.cpp',
'asmjs/WasmStubs.cpp',
+ 'asmjs/WasmTypes.cpp',
'builtin/AtomicsObject.cpp',
'builtin/Eval.cpp',
'builtin/Intl.cpp',
'builtin/MapObject.cpp',
'builtin/ModuleObject.cpp',
'builtin/Object.cpp',
'builtin/Profilers.cpp',
'builtin/Reflect.cpp',
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -74,17 +74,17 @@ js::SetFakeCPUCount(size_t count)
// This must be called before the threads have been initialized.
MOZ_ASSERT(!HelperThreadState().threads);
HelperThreadState().cpuCount = count;
HelperThreadState().threadCount = ThreadCountForCPUCount(count);
}
bool
-js::StartOffThreadWasmCompile(ExclusiveContext* cx, wasm::CompileTask* task)
+js::StartOffThreadWasmCompile(ExclusiveContext* cx, wasm::IonCompileTask* task)
{
AutoLockHelperThreadState lock;
// Don't append this task if another failed.
if (HelperThreadState().wasmFailed())
return false;
if (!HelperThreadState().wasmWorklist().append(task))
@@ -732,17 +732,17 @@ GlobalHelperThreadState::canStartWasmCom
{
// Don't execute an wasm job if an earlier one failed.
MOZ_ASSERT(isLocked());
if (wasmWorklist().empty() || numWasmFailedJobs)
return false;
// Honor the maximum allowed threads to compile wasm jobs at once,
// to avoid oversaturating the machine.
- if (!checkTaskThreadLimit<wasm::CompileTask*>(maxWasmCompilationThreads()))
+ if (!checkTaskThreadLimit<wasm::IonCompileTask*>(maxWasmCompilationThreads()))
return false;
return true;
}
static bool
IonBuilderHasHigherPriority(jit::IonBuilder* first, jit::IonBuilder* second)
{
@@ -1196,21 +1196,21 @@ HelperThread::handleWasmWorkload()
{
MOZ_ASSERT(HelperThreadState().isLocked());
MOZ_ASSERT(HelperThreadState().canStartWasmCompile());
MOZ_ASSERT(idle());
currentTask.emplace(HelperThreadState().wasmWorklist().popCopy());
bool success = false;
- wasm::CompileTask* task = wasmTask();
+ wasm::IonCompileTask* task = wasmTask();
{
AutoUnlockHelperThreadState unlock;
- PerThreadData::AutoEnterRuntime enter(threadData.ptr(), task->args().runtime);
- success = wasm::CompileFunction(task);
+ PerThreadData::AutoEnterRuntime enter(threadData.ptr(), task->runtime());
+ success = wasm::IonCompileFunction(task);
}
// On success, try to move work to the finished list.
if (success)
success = HelperThreadState().wasmFinishedList().append(task);
// On failure, note the failure for harvesting by the parent.
if (!success)
--- a/js/src/vm/HelperThreads.h
+++ b/js/src/vm/HelperThreads.h
@@ -15,33 +15,31 @@
#include "mozilla/GuardObjects.h"
#include "mozilla/PodOperations.h"
#include "mozilla/Variant.h"
#include "jscntxt.h"
#include "jslock.h"
-#include "asmjs/WasmCompileArgs.h"
#include "frontend/TokenStream.h"
#include "jit/Ion.h"
namespace js {
struct HelperThread;
struct ParseTask;
namespace jit {
class IonBuilder;
} // namespace jit
namespace wasm {
- struct CompileArgs;
- class CompileTask;
class FuncIR;
class FunctionCompileResults;
- typedef Vector<CompileTask*, 0, SystemAllocPolicy> CompileTaskVector;
+ class IonCompileTask;
+ typedef Vector<IonCompileTask*, 0, SystemAllocPolicy> IonCompileTaskVector;
} // namespace wasm
// Per-process state for off thread work items.
class GlobalHelperThreadState
{
public:
// Number of CPUs to treat this machine as having when creating threads.
// May be accessed without locking.
@@ -65,17 +63,17 @@ class GlobalHelperThreadState
// Ion compilation worklist and finished jobs.
IonBuilderVector ionWorklist_, ionFinishedList_;
// List of IonBuilders using lazy linking pending to get linked.
IonBuilderList ionLazyLinkList_;
// wasm worklist and finished jobs.
- wasm::CompileTaskVector wasmWorklist_, wasmFinishedList_;
+ wasm::IonCompileTaskVector wasmWorklist_, wasmFinishedList_;
public:
// For now, only allow a single parallel asm.js compilation to happen at a
// time. This avoids race conditions on wasmWorklist/wasmFinishedList/etc.
mozilla::Atomic<bool> wasmCompilationInProgress;
private:
// Script parsing/emitting worklist and finished jobs.
@@ -148,21 +146,21 @@ class GlobalHelperThreadState
return ionFinishedList_;
}
IonBuilderList& ionLazyLinkList() {
MOZ_ASSERT(TlsPerThreadData.get()->runtimeFromMainThread(),
"Should only be mutated by the main thread.");
return ionLazyLinkList_;
}
- wasm::CompileTaskVector& wasmWorklist() {
+ wasm::IonCompileTaskVector& wasmWorklist() {
MOZ_ASSERT(isLocked());
return wasmWorklist_;
}
- wasm::CompileTaskVector& wasmFinishedList() {
+ wasm::IonCompileTaskVector& wasmFinishedList() {
MOZ_ASSERT(isLocked());
return wasmFinishedList_;
}
ParseTaskVector& parseWorklist() {
MOZ_ASSERT(isLocked());
return parseWorklist_;
}
@@ -291,34 +289,34 @@ struct HelperThread
* Indicate to a thread that it should pause execution. This is only
* written with the helper thread state lock held, but may be read from
* without the lock held.
*/
mozilla::Atomic<bool, mozilla::Relaxed> pause;
/* The current task being executed by this thread, if any. */
mozilla::Maybe<mozilla::Variant<jit::IonBuilder*,
- wasm::CompileTask*,
+ wasm::IonCompileTask*,
ParseTask*,
SourceCompressionTask*,
GCHelperState*,
GCParallelTask*>> currentTask;
bool idle() const {
return currentTask.isNothing();
}
/* Any builder currently being compiled by Ion on this thread. */
jit::IonBuilder* ionBuilder() {
return maybeCurrentTaskAs<jit::IonBuilder*>();
}
/* Any wasm data currently being optimized by Ion on this thread. */
- wasm::CompileTask* wasmTask() {
- return maybeCurrentTaskAs<wasm::CompileTask*>();
+ wasm::IonCompileTask* wasmTask() {
+ return maybeCurrentTaskAs<wasm::IonCompileTask*>();
}
/* Any source being parsed/emitted on this thread. */
ParseTask* parseTask() {
return maybeCurrentTaskAs<ParseTask*>();
}
/* Any source being compressed on this thread. */
@@ -378,17 +376,17 @@ void
SetFakeCPUCount(size_t count);
// Pause the current thread until it's pause flag is unset.
void
PauseCurrentHelperThread();
/* Perform MIR optimization and LIR generation on a single function. */
bool
-StartOffThreadWasmCompile(ExclusiveContext* cx, wasm::CompileTask* task);
+StartOffThreadWasmCompile(ExclusiveContext* cx, wasm::IonCompileTask* task);
/*
* Schedule an Ion compilation for a script, given a builder which has been
* generated and read everything needed from the VM state.
*/
bool
StartOffThreadIonCompile(JSContext* cx, jit::IonBuilder* builder);
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -34,17 +34,16 @@
#include "jsmath.h"
#include "jsnativestack.h"
#include "jsobj.h"
#include "jsscript.h"
#include "jswatchpoint.h"
#include "jswin.h"
#include "jswrapper.h"
-#include "asmjs/AsmJSSignalHandlers.h"
#include "jit/arm/Simulator-arm.h"
#include "jit/arm64/vixl/Simulator-vixl.h"
#include "jit/JitCompartment.h"
#include "jit/mips32/Simulator-mips32.h"
#include "jit/mips64/Simulator-mips64.h"
#include "jit/PcScriptCache.h"
#include "js/Date.h"
#include "js/MemoryMetrics.h"
@@ -196,17 +195,17 @@ JSRuntime::JSRuntime(JSRuntime* parentRu
canUseSignalHandlers_(false),
defaultFreeOp_(thisFromCtor()),
debuggerMutations(0),
securityCallbacks(const_cast<JSSecurityCallbacks*>(&NullSecurityCallbacks)),
DOMcallbacks(nullptr),
destroyPrincipals(nullptr),
readPrincipals(nullptr),
errorReporter(nullptr),
- linkedAsmJSModules(nullptr),
+ linkedWasmModules(nullptr),
propertyRemovals(0),
#if !EXPOSE_INTL_API
thousandsSeparator(0),
decimalSeparator(0),
numGrouping(0),
#endif
mathCache_(nullptr),
activeCompilations_(0),
@@ -340,17 +339,17 @@ JSRuntime::init(uint32_t maxbytes, uint3
simulator_ = js::jit::Simulator::Create();
if (!simulator_)
return false;
#endif
jitSupportsFloatingPoint = js::jit::JitSupportsFloatingPoint();
jitSupportsSimd = js::jit::JitSupportsSimd();
- signalHandlersInstalled_ = EnsureSignalHandlersInstalled(this);
+ signalHandlersInstalled_ = wasm::EnsureSignalHandlersInstalled(this);
canUseSignalHandlers_ = signalHandlersInstalled_ && !SignalBasedTriggersDisabled();
if (!spsProfiler.init())
return false;
if (!fx.initInstance())
return false;
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -18,19 +18,17 @@
#include "mozilla/Vector.h"
#include <setjmp.h>
#include "jsatom.h"
#include "jsclist.h"
#include "jsscript.h"
-#ifdef XP_DARWIN
-# include "asmjs/AsmJSSignalHandlers.h"
-#endif
+#include "asmjs/WasmSignalHandlers.h"
#include "builtin/AtomicsObject.h"
#include "ds/FixedSizeHash.h"
#include "frontend/ParseMaps.h"
#include "gc/GCRuntime.h"
#include "gc/Tracer.h"
#include "irregexp/RegExpStack.h"
#include "js/Debug.h"
#include "js/HashTable.h"
@@ -84,33 +82,36 @@ extern MOZ_COLD void
ReportAllocationOverflow(ExclusiveContext* maybecx);
extern MOZ_COLD void
ReportOverRecursed(ExclusiveContext* cx);
class Activation;
class ActivationIterator;
class AsmJSActivation;
-class AsmJSModule;
class MathCache;
namespace jit {
class JitRuntime;
class JitActivation;
struct PcScriptCache;
struct AutoFlushICache;
class CompileRuntime;
#ifdef JS_SIMULATOR_ARM64
typedef vixl::Simulator Simulator;
#elif defined(JS_SIMULATOR)
class Simulator;
#endif
} // namespace jit
+namespace wasm {
+class Module;
+} // namespace wasm
+
/*
* GetSrcNote cache to avoid O(n^2) growth in finding a source note for a
* given pc in a script. We use the script->code pointer to tag the cache,
* instead of the script address itself, so that source notes are always found
* by offset from the bytecode with which they were generated.
*/
struct GSNCache {
typedef HashMap<jsbytecode*,
@@ -1122,17 +1123,17 @@ struct JSRuntime : public JS::shadow::Ru
* onNewGlobalObject handler methods established.
*/
JSCList onNewGlobalObjectWatchers;
/* Client opaque pointers */
void* data;
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
- js::AsmJSMachExceptionHandler asmJSMachExceptionHandler;
+ js::wasm::MachExceptionHandler wasmMachExceptionHandler;
#endif
private:
// Whether EnsureSignalHandlersInstalled succeeded in installing all the
// relevant handlers for this platform.
bool signalHandlersInstalled_;
// Whether we should use them or they have been disabled for making
@@ -1163,18 +1164,18 @@ struct JSRuntime : public JS::shadow::Ru
JSReadPrincipalsOp readPrincipals;
/* Optional error reporter. */
JSErrorReporter errorReporter;
/* AsmJSCache callbacks are runtime-wide. */
JS::AsmJSCacheOps asmJSCacheOps;
- /* Head of the linked list of linked asm.js modules. */
- js::AsmJSModule* linkedAsmJSModules;
+ /* Head of the linked list of linked wasm modules. */
+ js::wasm::Module* linkedWasmModules;
/*
* The propertyRemovals counter is incremented for every JSObject::clear,
* and for each JSObject::remove method call that frees a slot in the given
* object. See js_NativeGet and js_NativeSet in jsobj.cpp.
*/
uint32_t propertyRemovals;
--- a/js/src/vm/SharedArrayObject.cpp
+++ b/js/src/vm/SharedArrayObject.cpp
@@ -23,16 +23,18 @@
#endif
#include "asmjs/AsmJSValidate.h"
#include "vm/SharedMem.h"
#include "vm/TypedArrayCommon.h"
#include "jsobjinlines.h"
+#include "vm/NativeObject-inl.h"
+
using namespace js;
static inline void*
MapMemory(size_t length, bool commit)
{
#ifdef XP_WIN
int prot = (commit ? MEM_COMMIT : MEM_RESERVE);
int flags = (commit ? PAGE_READWRITE : PAGE_NOACCESS);
--- a/js/src/vm/Stack-inl.h
+++ b/js/src/vm/Stack-inl.h
@@ -1018,22 +1018,16 @@ InterpreterActivation::resumeGeneratorFr
InterpreterStack& stack = cx_->asJSContext()->runtime()->interpreterStack();
if (!stack.resumeGeneratorCallFrame(cx_->asJSContext(), regs_, callee, newTarget, scopeChain))
return false;
MOZ_ASSERT(regs_.fp()->script()->compartment() == compartment_);
return true;
}
-inline JSContext*
-AsmJSActivation::cx()
-{
- return cx_->asJSContext();
-}
-
inline bool
FrameIter::hasCachedSavedFrame() const
{
if (isAsmJS())
return false;
if (hasUsableAbstractFramePtr())
return abstractFramePtr().hasCachedSavedFrame();
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -5,18 +5,18 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "vm/Stack-inl.h"
#include "mozilla/PodOperations.h"
#include "jscntxt.h"
-#include "asmjs/AsmJSFrameIterator.h"
#include "asmjs/AsmJSModule.h"
+#include "asmjs/WasmFrameIterator.h"
#include "gc/Marking.h"
#include "jit/BaselineFrame.h"
#include "jit/JitcodeMap.h"
#include "jit/JitCompartment.h"
#include "js/GCAPI.h"
#include "vm/Debugger.h"
#include "vm/Opcodes.h"
@@ -603,17 +603,17 @@ FrameIter::settleOnActivation()
}
nextJitFrame();
data_.state_ = JIT;
return;
}
if (activation->isAsmJS()) {
- data_.asmJSFrames_ = AsmJSFrameIterator(*data_.activations_->asAsmJS());
+ data_.asmJSFrames_ = wasm::FrameIterator(*data_.activations_->asAsmJS());
if (data_.asmJSFrames_.done()) {
++data_.activations_;
continue;
}
data_.state_ = ASMJS;
return;
@@ -981,17 +981,17 @@ FrameIter::scriptFilename() const
{
switch (data_.state_) {
case DONE:
break;
case INTERP:
case JIT:
return script()->filename();
case ASMJS:
- return data_.activations_->asAsmJS()->module().scriptSource()->filename();
+ return data_.activations_->asAsmJS()->module().wasm().filename();
}
MOZ_CRASH("Unexpected state");
}
const char16_t*
FrameIter::scriptDisplayURL() const
{
@@ -1739,40 +1739,40 @@ jit::JitActivation::markIonRecovery(JSTr
}
AsmJSActivation::AsmJSActivation(JSContext* cx, AsmJSModule& module)
: Activation(cx, AsmJS),
module_(module),
entrySP_(nullptr),
resumePC_(nullptr),
fp_(nullptr),
- packedExitReason_(wasm::ExitReason(wasm::ExitReason::None).pack())
+ exitReason_(wasm::ExitReason::None)
{
(void) entrySP_; // squelch GCC warning
- prevAsmJSForModule_ = module.activation();
- module.activation() = this;
+ prevAsmJSForModule_ = module.wasm().activation();
+ module.wasm().activation() = this;
prevAsmJS_ = cx->runtime()->asmJSActivationStack_;
cx->runtime()->asmJSActivationStack_ = this;
// Now that the AsmJSActivation is fully initialized, make it visible to
// asynchronous profiling.
registerProfiling();
}
AsmJSActivation::~AsmJSActivation()
{
// Hide this activation from the profiler before is is destroyed.
unregisterProfiling();
MOZ_ASSERT(fp_ == nullptr);
- MOZ_ASSERT(module_.activation() == this);
- module_.activation() = prevAsmJSForModule_;
+ MOZ_ASSERT(module_.wasm().activation() == this);
+ module_.wasm().activation() = prevAsmJSForModule_;
JSContext* cx = cx_->asJSContext();
MOZ_ASSERT(cx->runtime()->asmJSActivationStack_ == this);
cx->runtime()->asmJSActivationStack_ = prevAsmJS_;
}
InterpreterFrameIterator&
@@ -1855,17 +1855,17 @@ JS::ProfilingFrameIterator::ProfilingFra
// If profiler sampling is not enabled, skip.
if (!rt_->isProfilerSamplingEnabled())
return;
activation_ = rt->profilingActivation();
MOZ_ASSERT(activation_->isProfiling());
- static_assert(sizeof(AsmJSProfilingFrameIterator) <= StorageSpace &&
+ static_assert(sizeof(wasm::ProfilingFrameIterator) <= StorageSpace &&
sizeof(jit::JitProfilingFrameIterator) <= StorageSpace,
"Need to increase storage");
iteratorConstruct(state);
settle();
}
JS::ProfilingFrameIterator::~ProfilingFrameIterator()
@@ -1911,50 +1911,50 @@ JS::ProfilingFrameIterator::settle()
void
JS::ProfilingFrameIterator::iteratorConstruct(const RegisterState& state)
{
MOZ_ASSERT(!done());
MOZ_ASSERT(activation_->isAsmJS() || activation_->isJit());
if (activation_->isAsmJS()) {
- new (storage_.addr()) AsmJSProfilingFrameIterator(*activation_->asAsmJS(), state);
+ new (storage_.addr()) wasm::ProfilingFrameIterator(*activation_->asAsmJS(), state);
// Set savedPrevJitTop_ to the actual jitTop_ from the runtime.
savedPrevJitTop_ = activation_->cx()->runtime()->jitTop;
return;
}
MOZ_ASSERT(activation_->asJit()->isActive());
new (storage_.addr()) jit::JitProfilingFrameIterator(rt_, state);
}
void
JS::ProfilingFrameIterator::iteratorConstruct()
{
MOZ_ASSERT(!done());
MOZ_ASSERT(activation_->isAsmJS() || activation_->isJit());
if (activation_->isAsmJS()) {
- new (storage_.addr()) AsmJSProfilingFrameIterator(*activation_->asAsmJS());
+ new (storage_.addr()) wasm::ProfilingFrameIterator(*activation_->asAsmJS());
return;
}
MOZ_ASSERT(activation_->asJit()->isActive());
MOZ_ASSERT(savedPrevJitTop_ != nullptr);
new (storage_.addr()) jit::JitProfilingFrameIterator(savedPrevJitTop_);
}
void
JS::ProfilingFrameIterator::iteratorDestroy()
{
MOZ_ASSERT(!done());
MOZ_ASSERT(activation_->isAsmJS() || activation_->isJit());
if (activation_->isAsmJS()) {
- asmJSIter().~AsmJSProfilingFrameIterator();
+ asmJSIter().~ProfilingFrameIterator();
return;
}
// Save prevjitTop for later use
savedPrevJitTop_ = activation_->asJit()->prevJitTop();
jitIter().~JitProfilingFrameIterator();
}
--- a/js/src/vm/Stack.h
+++ b/js/src/vm/Stack.h
@@ -11,17 +11,17 @@
#include "mozilla/Maybe.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/Variant.h"
#include "jsfun.h"
#include "jsscript.h"
#include "jsutil.h"
-#include "asmjs/AsmJSFrameIterator.h"
+#include "asmjs/WasmFrameIterator.h"
#include "gc/Rooting.h"
#include "jit/JitFrameIterator.h"
#ifdef CHECK_OSIPOINT_REGISTERS
#include "jit/Registers.h" // for RegisterDump
#endif
#include "js/RootingAPI.h"
#include "vm/SavedFrame.h"
@@ -1799,45 +1799,44 @@ class InterpreterFrameIterator
class AsmJSActivation : public Activation
{
AsmJSModule& module_;
AsmJSActivation* prevAsmJS_;
AsmJSActivation* prevAsmJSForModule_;
void* entrySP_;
void* resumePC_;
uint8_t* fp_;
- uint32_t packedExitReason_;
+ wasm::ExitReason exitReason_;
public:
AsmJSActivation(JSContext* cx, AsmJSModule& module);
~AsmJSActivation();
- inline JSContext* cx();
AsmJSModule& module() const { return module_; }
AsmJSActivation* prevAsmJS() const { return prevAsmJS_; }
bool isProfiling() const {
return true;
}
// Returns a pointer to the base of the innermost stack frame of asm.js code
// in this activation.
uint8_t* fp() const { return fp_; }
// Returns the reason why asm.js code called out of asm.js code.
- wasm::ExitReason exitReason() const { return wasm::ExitReason::unpack(packedExitReason_); }
+ wasm::ExitReason exitReason() const { return exitReason_; }
// Read by JIT code:
static unsigned offsetOfContext() { return offsetof(AsmJSActivation, cx_); }
static unsigned offsetOfResumePC() { return offsetof(AsmJSActivation, resumePC_); }
// Written by JIT code:
static unsigned offsetOfEntrySP() { return offsetof(AsmJSActivation, entrySP_); }
static unsigned offsetOfFP() { return offsetof(AsmJSActivation, fp_); }
- static unsigned offsetOfPackedExitReason() { return offsetof(AsmJSActivation, packedExitReason_); }
+ static unsigned offsetOfExitReason() { return offsetof(AsmJSActivation, exitReason_); }
// Read/written from SIGSEGV handler:
void setResumePC(void* pc) { resumePC_ = pc; }
void* resumePC() const { return resumePC_; }
};
// A FrameIter walks over the runtime's stack of JS script activations,
// abstracting over whether the JS scripts were running in the interpreter or
@@ -1884,17 +1883,17 @@ class FrameIter
jsbytecode * pc_;
InterpreterFrameIterator interpFrames_;
ActivationIterator activations_;
jit::JitFrameIterator jitFrames_;
unsigned ionInlineFrameNo_;
- AsmJSFrameIterator asmJSFrames_;
+ wasm::FrameIterator asmJSFrames_;
Data(JSContext* cx, SavedOption savedOption, ContextOption contextOption,
DebuggerEvalOption debuggerEvalOption, JSPrincipals* principals);
Data(const Data& other);
};
MOZ_IMPLICIT FrameIter(JSContext* cx, SavedOption = STOP_AT_SAVED);
FrameIter(JSContext* cx, ContextOption, SavedOption,