本文整理汇总了C++中AsmJSActivation类的典型用法代码示例。如果您正苦于以下问题:C++ AsmJSActivation类的具体用法?C++ AsmJSActivation怎么用?C++ AsmJSActivation使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AsmJSActivation类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: settle
AsmJSFrameIterator::AsmJSFrameIterator(const AsmJSActivation &activation)
: module_(&activation.module()),
fp_(activation.fp())
{
if (!fp_)
return;
settle();
}
示例2: ReturnAddressFromFP
void
AsmJSProfilingFrameIterator::initFromFP(const AsmJSActivation &activation)
{
uint8_t *fp = activation.fp();
// If a signal was handled while entering an activation, the frame will
// still be null.
if (!fp) {
JS_ASSERT(done());
return;
}
// Since we don't have the pc for fp, start unwinding at the caller of fp,
// whose pc we do have via fp->returnAddress. This means that the innermost
// frame is skipped but this is fine because:
// - for FFI calls, the innermost frame is a thunk, so the first frame that
// shows up is the function calling the FFI;
// - for Math and other builtin calls, when profiling is activated, we
// patch all call sites to instead call through a thunk; and
// - for interrupts, we just accept that we'll lose the innermost frame.
void *pc = ReturnAddressFromFP(fp);
const AsmJSModule::CodeRange *codeRange = module_->lookupCodeRange(pc);
JS_ASSERT(codeRange);
codeRange_ = codeRange;
stackAddress_ = fp;
switch (codeRange->kind()) {
case AsmJSModule::CodeRange::Entry:
callerPC_ = nullptr;
callerFP_ = nullptr;
break;
case AsmJSModule::CodeRange::Function:
fp = CallerFPFromFP(fp);
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp);
break;
case AsmJSModule::CodeRange::IonFFI:
case AsmJSModule::CodeRange::SlowFFI:
case AsmJSModule::CodeRange::Interrupt:
case AsmJSModule::CodeRange::Inline:
case AsmJSModule::CodeRange::Thunk:
MOZ_CRASH("Unexpected CodeRange kind");
}
// Since, despite the above reasoning for skipping a frame, we do want FFI
// trampolines and interrupts to show up in the profile (so they can
// accumulate self time and explain performance faults), an "exit reason" is
// stored on all the paths leaving asm.js and the iterator logic treats this
// reason as its own frame. If we have exited asm.js code without setting an
// exit reason, the reason will be None and this means the code was
// asynchronously interrupted.
exitReason_ = activation.exitReason();
if (exitReason_ == AsmJSExit::None)
exitReason_ = AsmJSExit::Interrupt;
JS_ASSERT(!done());
}
示例3: HandleSignal
// Be very cautious and default to not handling; we don't want to accidentally
// silence real crashes from real bugs.
static bool
HandleSignal(int signum, siginfo_t *info, void *ctx)
{
AsmJSActivation *activation = InnermostAsmJSActivation();
if (!activation)
return false;
CONTEXT *context = (CONTEXT *)ctx;
uint8_t **ppc = ContextToPC(context);
uint8_t *pc = *ppc;
const AsmJSModule &module = activation->module();
if (!module.containsPC(pc))
return false;
void *faultingAddress = info->si_addr;
// If we faulted trying to execute code in 'module', this must be an
// operation callback (see TriggerOperationCallbackForAsmJSCode). Redirect
// execution to a trampoline which will call js_HandleExecutionInterrupt.
// The trampoline will jump to activation->resumePC if execution isn't
// interrupted.
if (module.containsPC(faultingAddress)) {
activation->setResumePC(pc);
*ppc = module.operationCallbackExit();
mprotect(module.functionCode(), module.functionBytes(), PROT_EXEC);
return true;
}
# if defined(JS_CPU_X64)
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
if (!module.maybeHeap() ||
faultingAddress < module.maybeHeap() ||
faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize)
{
return false;
}
const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc);
if (!heapAccess)
return false;
// We now know that this is an out-of-bounds access made by an asm.js
// load/store that we should handle. If this is a load, assign the
// JS-defined result value to the destination register (ToInt32(undefined)
// or ToNumber(undefined), determined by the type of the destination
// register) and set the PC to the next op. Upon return from the handler,
// execution will resume at this next PC.
if (heapAccess->isLoad())
SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
*ppc += heapAccess->opLength();
return true;
# else
return false;
# endif
}
示例4: JS_ASSERT
// To interrupt execution of a JSRuntime, any thread may call
// JS_RequestInterruptCallback (JSRuntime::requestInterruptCallback from inside
// the engine). In the simplest case, this sets some state that is polled at
// regular intervals (function prologues, loop headers). For tight loops, this
// poses non-trivial overhead. For asm.js, we can do better: when another
// thread requests an interrupt, we simply mprotect all of the innermost asm.js
// module activation's code. This will trigger a SIGSEGV, taking us into
// AsmJSFaultHandler. From there, we can manually redirect execution to call
// js::HandleExecutionInterrupt. The memory is un-protected from the signal
// handler after control flow is redirected.
void
js::RequestInterruptForAsmJSCode(JSRuntime *rt)
{
JS_ASSERT(rt->currentThreadOwnsInterruptLock());
AsmJSActivation *activation = rt->mainThread.asmJSActivationStackFromAnyThread();
if (!activation)
return;
activation->module().protectCode(rt);
}
示例5: JS_ASSERT
// To interrupt execution of a JSRuntime, any thread may call
// JS_TriggerOperationCallback (JSRuntime::triggerOperationCallback from inside
// the engine). Normally, this sets some state that is polled at regular
// intervals (function prologues, loop headers), even from jit-code. For tight
// loops, this poses non-trivial overhead. For asm.js, we can do better: when
// another thread triggers the operation callback, we simply mprotect all of
// the innermost asm.js module activation's code. This will trigger a SIGSEGV,
// taking us into AsmJSFaultHandler. From there, we can manually redirect
// execution to call js_HandleExecutionInterrupt. The memory is un-protected
// from the signal handler after control flow is redirected.
void
js::TriggerOperationCallbackForAsmJSCode(JSRuntime *rt)
{
JS_ASSERT(rt->currentThreadOwnsOperationCallbackLock());
AsmJSActivation *activation = rt->mainThread.asmJSActivationStackFromAnyThread();
if (!activation)
return;
activation->module().protectCode(rt);
}
示例6: JS_ASSERT
bool
ArrayBufferObject::canNeuterAsmJSArrayBuffer(JSContext *cx, ArrayBufferObject &buffer)
{
JS_ASSERT(!buffer.isSharedArrayBuffer());
AsmJSActivation *act = cx->mainThread().asmJSActivationStack();
for (; act; act = act->prevAsmJS()) {
if (act->module().maybeHeapBufferObject() == &buffer)
break;
}
if (!act)
return true;
return false;
}
示例7: initFromFP
AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation &activation)
: module_(&activation.module()),
callerFP_(nullptr),
callerPC_(nullptr),
stackAddress_(nullptr),
exitReason_(AsmJSExit::None),
codeRange_(nullptr)
{
initFromFP(activation);
}
示例8: HandleException
static bool
HandleException(PEXCEPTIONREPORTRECORD pReport,
PCONTEXTRECORD pContext)
{
if (pReport->ExceptionNum != XCPT_ACCESS_VIOLATION)
return false;
AsmJSActivation *activation = InnermostAsmJSActivation();
if (!activation)
return false;
uint8_t **ppc = ContextToPC(pContext);
uint8_t *pc = *ppc;
JS_ASSERT(pc == pReport->ExceptionAddress);
const AsmJSModule &module = activation->module();
if (!module.containsPC(pc))
return false;
if (pReport->cParameters < 2)
return false;
void *faultingAddress = (void*)pReport->ExceptionInfo[1];
// If we faulted trying to execute code in 'module', this must be an
// operation callback (see TriggerOperationCallbackForAsmJSCode). Redirect
// execution to a trampoline which will call js_HandleExecutionInterrupt.
// The trampoline will jump to activation->resumePC if execution isn't
// interrupted.
if (module.containsPC(faultingAddress)) {
activation->setResumePC(pc);
*ppc = module.operationCallbackExit();
if (!DosSetMem(module.functionCode(), module.functionBytes(), PAG_COMMIT | PAG_DEFAULT))
MOZ_CRASH();
return true;
}
return false;
}
示例9: initFromFP
AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& activation)
: module_(&activation.module()),
callerFP_(nullptr),
callerPC_(nullptr),
stackAddress_(nullptr),
exitReason_(AsmJSExit::None),
codeRange_(nullptr)
{
// If profiling hasn't been enabled for this module, then CallerFPFromFP
// will be trash, so ignore the entire activation. In practice, this only
// happens if profiling is enabled while module->active() (in this case,
// profiling will be enabled when the module becomes inactive and gets
// called again).
if (!module_->profilingEnabled()) {
MOZ_ASSERT(done());
return;
}
initFromFP(activation);
}
示例10: module_
AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation &activation,
const RegisterState &state)
: module_(&activation.module()),
callerFP_(nullptr),
callerPC_(nullptr),
exitReason_(AsmJSExit::None),
codeRange_(nullptr)
{
// If profiling hasn't been enabled for this module, then CallerFPFromFP
// will be trash, so ignore the entire activation. In practice, this only
// happens if profiling is enabled while module->active() (in this case,
// profiling will be enabled when the module becomes inactive and gets
// called again).
if (!module_->profilingEnabled()) {
JS_ASSERT(done());
return;
}
// If pc isn't in the module, we must have exited the asm.js module via an
// exit trampoline or signal handler.
if (!module_->containsCodePC(state.pc)) {
initFromFP(activation);
return;
}
// Note: fp may be null while entering and leaving the activation.
uint8_t *fp = activation.fp();
const AsmJSModule::CodeRange *codeRange = module_->lookupCodeRange(state.pc);
switch (codeRange->kind()) {
case AsmJSModule::CodeRange::Function:
case AsmJSModule::CodeRange::IonFFI:
case AsmJSModule::CodeRange::SlowFFI:
case AsmJSModule::CodeRange::Interrupt:
case AsmJSModule::CodeRange::Thunk: {
// While codeRange describes the *current* frame, the fp/pc state stored in
// the iterator is the *caller's* frame. The reason for this is that the
// activation.fp isn't always the AsmJSFrame for state.pc; during the
// prologue/epilogue, activation.fp will point to the caller's frame.
// Naively unwinding starting at activation.fp could thus lead to the
// second-to-innermost function being skipped in the callstack which will
// bork profiling stacks. Instead, we depend on the exact layout of the
// prologue/epilogue, as generated by GenerateProfiling(Prologue|Epilogue)
// below.
uint32_t offsetInModule = ((uint8_t*)state.pc) - module_->codeBase();
JS_ASSERT(offsetInModule < module_->codeBytes());
JS_ASSERT(offsetInModule >= codeRange->begin());
JS_ASSERT(offsetInModule < codeRange->end());
uint32_t offsetInCodeRange = offsetInModule - codeRange->begin();
void **sp = (void**)state.sp;
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
if (offsetInCodeRange < PushedRetAddr) {
callerPC_ = state.lr;
callerFP_ = fp;
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp - 2);
} else
#endif
if (offsetInCodeRange < PushedFP || offsetInModule == codeRange->profilingReturn()) {
callerPC_ = *sp;
callerFP_ = fp;
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp - 1);
} else if (offsetInCodeRange < StoredFP) {
JS_ASSERT(fp == CallerFPFromFP(sp));
callerPC_ = ReturnAddressFromFP(sp);
callerFP_ = CallerFPFromFP(sp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp);
} else {
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp);
}
break;
}
case AsmJSModule::CodeRange::Entry: {
// The entry trampoline is the final frame in an AsmJSActivation. The entry
// trampoline also doesn't GenerateAsmJSPrologue/Epilogue so we can't use
// the general unwinding logic below.
JS_ASSERT(!fp);
callerPC_ = nullptr;
callerFP_ = nullptr;
break;
}
case AsmJSModule::CodeRange::Inline: {
// The throw stub clears AsmJSActivation::fp on it's way out.
if (!fp) {
JS_ASSERT(done());
return;
}
// Inline code ranges execute in the frame of the caller have no
// prologue/epilogue and thus don't require the general unwinding logic
// as below.
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp);
break;
}
}
codeRange_ = codeRange;
//.........这里部分代码省略.........
示例11: module_
AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& activation,
const RegisterState& state)
: module_(&activation.module()),
callerFP_(nullptr),
callerPC_(nullptr),
exitReason_(AsmJSExit::None),
codeRange_(nullptr)
{
// If profiling hasn't been enabled for this module, then CallerFPFromFP
// will be trash, so ignore the entire activation. In practice, this only
// happens if profiling is enabled while module->active() (in this case,
// profiling will be enabled when the module becomes inactive and gets
// called again).
if (!module_->profilingEnabled()) {
MOZ_ASSERT(done());
return;
}
// If pc isn't in the module, we must have exited the asm.js module via an
// exit trampoline or signal handler.
if (!module_->containsCodePC(state.pc)) {
initFromFP(activation);
return;
}
// Note: fp may be null while entering and leaving the activation.
uint8_t* fp = activation.fp();
const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(state.pc);
switch (codeRange->kind()) {
case AsmJSModule::CodeRange::Function:
case AsmJSModule::CodeRange::JitFFI:
case AsmJSModule::CodeRange::SlowFFI:
case AsmJSModule::CodeRange::Interrupt:
case AsmJSModule::CodeRange::Thunk: {
// When the pc is inside the prologue/epilogue, the innermost
// call's AsmJSFrame is not complete and thus fp points to the the
// second-to-innermost call's AsmJSFrame. Since fp can only tell you
// about its caller (via ReturnAddressFromFP(fp)), naively unwinding
// while pc is in the prologue/epilogue would skip the second-to-
// innermost call. To avoid this problem, we use the static structure of
// the code in the prologue and epilogue to do the Right Thing.
uint32_t offsetInModule = (uint8_t*)state.pc - module_->codeBase();
MOZ_ASSERT(offsetInModule < module_->codeBytes());
MOZ_ASSERT(offsetInModule >= codeRange->begin());
MOZ_ASSERT(offsetInModule < codeRange->end());
uint32_t offsetInCodeRange = offsetInModule - codeRange->begin();
void** sp = (void**)state.sp;
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
if (offsetInCodeRange < PushedRetAddr) {
// First instruction of the ARM/MIPS function; the return address is
// still in lr and fp still holds the caller's fp.
callerPC_ = state.lr;
callerFP_ = fp;
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp - 2);
} else if (offsetInModule == codeRange->profilingReturn() - PostStorePrePopFP) {
// Second-to-last instruction of the ARM/MIPS function; fp points to
// the caller's fp; have not yet popped AsmJSFrame.
callerPC_ = ReturnAddressFromFP(sp);
callerFP_ = CallerFPFromFP(sp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp);
} else
#endif
if (offsetInCodeRange < PushedFP || offsetInModule == codeRange->profilingReturn()) {
// The return address has been pushed on the stack but not fp; fp
// still points to the caller's fp.
callerPC_ = *sp;
callerFP_ = fp;
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp - 1);
} else if (offsetInCodeRange < StoredFP) {
// The full AsmJSFrame has been pushed; fp still points to the
// caller's frame.
MOZ_ASSERT(fp == CallerFPFromFP(sp));
callerPC_ = ReturnAddressFromFP(sp);
callerFP_ = CallerFPFromFP(sp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp);
} else {
// Not in the prologue/epilogue.
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp);
}
break;
}
case AsmJSModule::CodeRange::Entry: {
// The entry trampoline is the final frame in an AsmJSActivation. The entry
// trampoline also doesn't GenerateAsmJSPrologue/Epilogue so we can't use
// the general unwinding logic above.
MOZ_ASSERT(!fp);
callerPC_ = nullptr;
callerFP_ = nullptr;
break;
}
case AsmJSModule::CodeRange::Inline: {
// The throw stub clears AsmJSActivation::fp on it's way out.
if (!fp) {
MOZ_ASSERT(done());
return;
}
//.........这里部分代码省略.........
示例12: HandleMachException
static bool
HandleMachException(JSRuntime *rt, const ExceptionRequest &request)
{
// Get the port of the JSRuntime's thread from the message.
mach_port_t rtThread = request.body.thread.name;
// Read out the JSRuntime thread's register state.
x86_thread_state_t state;
unsigned int count = x86_THREAD_STATE_COUNT;
kern_return_t kret;
kret = thread_get_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, &count);
if (kret != KERN_SUCCESS)
return false;
AsmJSActivation *activation = rt->mainThread.asmJSActivationStackFromAnyThread();
if (!activation)
return false;
uint8_t **ppc = ContextToPC(state);
uint8_t *pc = *ppc;
const AsmJSModule &module = activation->module();
if (!module.containsPC(pc))
return false;
if (request.body.exception != EXC_BAD_ACCESS || request.body.codeCnt != 2)
return false;
void *faultingAddress = (void*)request.body.code[1];
// If we faulted trying to execute code in 'module', this must be an
// operation callback (see TriggerOperationCallbackForAsmJSCode). Redirect
// execution to a trampoline which will call js_HandleExecutionInterrupt.
// The trampoline will jump to activation->resumePC if execution isn't
// interrupted.
if (module.containsPC(faultingAddress)) {
activation->setResumePC(pc);
*ppc = module.operationCallbackExit();
mprotect(module.functionCode(), module.functionBytes(), PROT_EXEC);
// Update the thread state with the new pc.
kret = thread_set_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, x86_THREAD_STATE_COUNT);
return kret == KERN_SUCCESS;
}
# if defined(JS_CPU_X64)
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
if (!module.maybeHeap() ||
faultingAddress < module.maybeHeap() ||
faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize)
{
return false;
}
const AsmJSHeapAccess *heapAccess = LookupHeapAccess(module, pc);
if (!heapAccess)
return false;
// We now know that this is an out-of-bounds access made by an asm.js
// load/store that we should handle. If this is a load, assign the
// JS-defined result value to the destination register (ToInt32(undefined)
// or ToNumber(undefined), determined by the type of the destination
// register) and set the PC to the next op. Upon return from the handler,
// execution will resume at this next PC.
if (heapAccess->isLoad()) {
if (!SetRegisterToCoercedUndefined(rtThread, state.uts.ts64, *heapAccess))
return false;
}
*ppc += heapAccess->opLength();
// Update the thread state with the new pc.
kret = thread_set_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, x86_THREAD_STATE_COUNT);
if (kret != KERN_SUCCESS)
return false;
return true;
# else
return false;
# endif
}
示例13: HandleSignal
// Be very cautious and default to not handling; we don't want to accidentally
// silence real crashes from real bugs.
static bool
HandleSignal(int signum, siginfo_t *info, void *ctx)
{
CONTEXT *context = (CONTEXT *)ctx;
uint8_t **ppc = ContextToPC(context);
uint8_t *pc = *ppc;
void *faultingAddress = info->si_addr;
JSRuntime *rt = RuntimeForCurrentThread();
// Don't allow recursive handling of signals, see AutoSetHandlingSignal.
if (!rt || rt->handlingSignal)
return false;
AutoSetHandlingSignal handling(rt);
if (rt->jitRuntime() && rt->jitRuntime()->handleAccessViolation(rt, faultingAddress))
return true;
AsmJSActivation *activation = InnermostAsmJSActivation();
if (!activation)
return false;
const AsmJSModule &module = activation->module();
if (HandleSimulatorInterrupt(rt, activation, faultingAddress)) {
JSRuntime::AutoLockForInterrupt lock(rt);
module.unprotectCode(rt);
return true;
}
if (!module.containsPC(pc))
return false;
// If we faulted trying to execute code in 'module', this must be an
// interrupt callback (see RequestInterruptForAsmJSCode). Redirect
// execution to a trampoline which will call js::HandleExecutionInterrupt.
// The trampoline will jump to activation->resumePC if execution isn't
// interrupted.
if (module.containsPC(faultingAddress)) {
activation->setInterrupted(pc);
*ppc = module.interruptExit();
JSRuntime::AutoLockForInterrupt lock(rt);
module.unprotectCode(rt);
return true;
}
# if defined(JS_CODEGEN_X64)
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
if (!module.maybeHeap() ||
faultingAddress < module.maybeHeap() ||
faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize)
{
return false;
}
const AsmJSHeapAccess *heapAccess = module.lookupHeapAccess(pc);
if (!heapAccess)
return false;
// We now know that this is an out-of-bounds access made by an asm.js
// load/store that we should handle. If this is a load, assign the
// JS-defined result value to the destination register (ToInt32(undefined)
// or ToNumber(undefined), determined by the type of the destination
// register) and set the PC to the next op. Upon return from the handler,
// execution will resume at this next PC.
if (heapAccess->isLoad())
SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
*ppc += heapAccess->opLength();
return true;
# else
return false;
# endif
}