本文整理汇总了C++中TrustedImmPtr函数的典型用法代码示例。如果您正苦于以下问题:C++ TrustedImmPtr函数的具体用法?C++ TrustedImmPtr怎么用?C++ TrustedImmPtr使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了TrustedImmPtr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: stubCall
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
{
int callee = instruction[1].u.operand;
int argCount = instruction[2].u.operand;
int registerOffset = instruction[3].u.operand;
// Handle eval
Jump wasEval;
if (opcodeID == op_call_eval) {
JITStubCall stubCall(this, cti_op_call_eval);
stubCall.addArgument(callee, regT0);
stubCall.addArgument(JIT::Imm32(registerOffset));
stubCall.addArgument(JIT::Imm32(argCount));
stubCall.call();
wasEval = branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue())));
}
emitGetVirtualRegister(callee, regT0);
// Check for JSFunctions.
emitJumpSlowCaseIfNotJSCell(regT0);
addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
// Speculatively roll the callframe, assuming argCount will match the arity.
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall());
if (opcodeID == op_call_eval)
wasEval.link(this);
sampleCodeBlock(m_codeBlock);
}
示例2: push
void MacroAssemblerX86Common::probe(MacroAssemblerX86Common::ProbeFunction function, void* arg1, void* arg2)
{
push(RegisterID::esp);
push(RegisterID::eax);
move(TrustedImmPtr(arg2), RegisterID::eax);
push(RegisterID::eax);
move(TrustedImmPtr(arg1), RegisterID::eax);
push(RegisterID::eax);
move(TrustedImmPtr(reinterpret_cast<void*>(function)), RegisterID::eax);
push(RegisterID::eax);
move(TrustedImmPtr(reinterpret_cast<void*>(ctiMasmProbeTrampoline)), RegisterID::eax);
call(RegisterID::eax);
}
示例3: emitLoad
void JIT::compileOpCallVarargs(Instruction* instruction)
{
int callee = instruction[1].u.operand;
int argCountRegister = instruction[2].u.operand;
int registerOffset = instruction[3].u.operand;
emitLoad(callee, regT1, regT0);
emitLoadPayload(argCountRegister, regT2); // argCount
addPtr(Imm32(registerOffset), regT2, regT3); // registerOffset
emitJumpSlowCaseIfNotJSCell(callee, regT1);
addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
// Speculatively roll the callframe, assuming argCount will match the arity.
mul32(TrustedImm32(sizeof(Register)), regT3, regT3);
addPtr(callFrameRegister, regT3);
store32(TrustedImm32(JSValue::CellTag), tagFor(RegisterFile::CallerFrame, regT3));
storePtr(callFrameRegister, payloadFor(RegisterFile::CallerFrame, regT3));
move(regT3, callFrameRegister);
move(regT2, regT1); // argCount
emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
sampleCodeBlock(m_codeBlock);
}
示例4: CodeOrigin
void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
{
CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0);
linkSlowCase(iter);
int registerOffset = -instruction[4].u.operand;
addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0);
loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT1);
move(TrustedImmPtr(info), regT2);
emitLoad(JSStack::Callee, regT1, regT0);
MacroAssemblerCodeRef virtualThunk = virtualThunkFor(m_vm, *info);
info->setSlowStub(createJITStubRoutine(virtualThunk, *m_vm, nullptr, true));
emitNakedCall(virtualThunk.code());
addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
emitPutCallResult(instruction);
}
示例5: compileCallEvalSlowCase
void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
{
if (opcodeID == op_call_eval) {
compileCallEvalSlowCase(instruction, iter);
return;
}
linkSlowCase(iter);
if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments)
emitRestoreCalleeSaves();
move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code());
if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
abortWithReason(JITDidReturnFromTailCall);
return;
}
addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
emitPutCallResult(instruction);
}
示例6: emitGetCallerFrameFromCallFrameHeaderPtr
void JITCompiler::compileExceptionHandlers()
{
if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty())
return;
Jump doLookup;
if (!m_exceptionChecksWithCallFrameRollback.empty()) {
m_exceptionChecksWithCallFrameRollback.link(this);
emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR1);
doLookup = jump();
}
if (!m_exceptionChecks.empty())
m_exceptionChecks.link(this);
// lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
if (doLookup.isSet())
doLookup.link(this);
move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
#if CPU(X86)
// FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
jumpToExceptionHandler();
}
示例7: linkSlowCase
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
{
int argCount = instruction[2].u.operand;
int registerOffset = instruction[3].u.operand;
linkSlowCase(iter);
// Fast check for JS function.
Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT0);
Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr));
// Speculatively roll the callframe, assuming argCount will match the arity.
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
// Done! - return back to the hot path.
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
// This handles host functions
callLinkFailNotObject.link(this);
callLinkFailNotJSFunction.link(this);
JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
stubCall.addArgument(regT0);
stubCall.addArgument(JIT::Imm32(registerOffset));
stubCall.addArgument(JIT::Imm32(argCount));
stubCall.call();
sampleCodeBlock(m_codeBlock);
}
示例8: emitGetVirtualRegister
void JIT::compileOpCallVarargs(Instruction* instruction)
{
int callee = instruction[1].u.operand;
int argCountRegister = instruction[2].u.operand;
int registerOffset = instruction[3].u.operand;
emitGetVirtualRegister(argCountRegister, regT1);
emitFastArithImmToInt(regT1);
emitGetVirtualRegister(callee, regT0);
addPtr(Imm32(registerOffset), regT1, regT2);
// Check for JSFunctions.
emitJumpSlowCaseIfNotJSCell(regT0);
addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
// Speculatively roll the callframe, assuming argCount will match the arity.
mul32(TrustedImm32(sizeof(Register)), regT2, regT2);
intptr_t offset = (intptr_t)sizeof(Register) * (intptr_t)RegisterFile::CallerFrame;
addPtr(Imm32((int32_t)offset), regT2, regT3);
addPtr(callFrameRegister, regT3);
storePtr(callFrameRegister, regT3);
addPtr(regT2, callFrameRegister);
emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
sampleCodeBlock(m_codeBlock);
}
示例9: branchDouble
void AssemblyHelpers::purifyNaN(FPRReg fpr)
{
MacroAssembler::Jump notNaN = branchDouble(DoubleEqual, fpr, fpr);
static const double NaN = PNaN;
loadDouble(TrustedImmPtr(&NaN), fpr);
notNaN.link(this);
}
示例10: CPU
void JITCompiler::jitAssertIsInt32(GPRReg gpr)
{
#if CPU(X86_64)
Jump checkInt32 = branchPtr(BelowOrEqual, gpr, TrustedImmPtr(reinterpret_cast<void*>(static_cast<uintptr_t>(0xFFFFFFFFu))));
breakpoint();
checkInt32.link(this);
#else
UNUSED_PARAM(gpr);
#endif
}
示例11: copyCalleeSavesToVMCalleeSavesBuffer
void JITCompiler::compileExceptionHandlers()
{
if (!m_exceptionChecksWithCallFrameRollback.empty()) {
m_exceptionChecksWithCallFrameRollback.link(this);
copyCalleeSavesToVMCalleeSavesBuffer();
// lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
#if CPU(X86)
// FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
jumpToExceptionHandler();
}
if (!m_exceptionChecks.empty()) {
m_exceptionChecks.link(this);
copyCalleeSavesToVMCalleeSavesBuffer();
// lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
#if CPU(X86)
// FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
jumpToExceptionHandler();
}
}
示例12: vm
void AssemblyHelpers::callExceptionFuzz()
{
if (!Options::enableExceptionFuzz())
return;
EncodedJSValue* buffer = vm()->exceptionFuzzingBuffer(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters));
for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
#if USE(JSVALUE64)
store64(GPRInfo::toRegister(i), buffer + i);
#else
store32(GPRInfo::toRegister(i), buffer + i);
#endif
}
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
storeDouble(FPRInfo::toRegister(i), Address(GPRInfo::regT0));
}
// Set up one argument.
#if CPU(X86)
poke(GPRInfo::callFrameRegister, 0);
#else
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
#endif
move(TrustedImmPtr(bitwise_cast<void*>(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR);
call(GPRInfo::nonPreservedNonReturnGPR);
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
loadDouble(Address(GPRInfo::regT0), FPRInfo::toRegister(i));
}
for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
#if USE(JSVALUE64)
load64(buffer + i, GPRInfo::toRegister(i));
#else
load32(buffer + i, GPRInfo::toRegister(i));
#endif
}
}
示例13: linkSlowCase
void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
load64(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0);
move(TrustedImmPtr(&CallLinkInfo::dummy()), regT2);
emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code());
addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
emitPutCallResult(instruction);
}
示例14: ASSERT
void AssemblyHelpers::callExceptionFuzz()
{
if (!Options::enableExceptionFuzz())
return;
ASSERT(stackAlignmentBytes() >= sizeof(void*) * 2);
subPtr(TrustedImm32(stackAlignmentBytes()), stackPointerRegister);
poke(GPRInfo::returnValueGPR, 0);
poke(GPRInfo::returnValueGPR2, 1);
move(TrustedImmPtr(bitwise_cast<void*>(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR);
call(GPRInfo::nonPreservedNonReturnGPR);
peek(GPRInfo::returnValueGPR, 0);
peek(GPRInfo::returnValueGPR2, 1);
addPtr(TrustedImm32(stackAlignmentBytes()), stackPointerRegister);
}
示例15: compileCallEvalSlowCase
void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
{
if (opcodeID == op_call_eval) {
compileCallEvalSlowCase(instruction, iter);
return;
}
linkSlowCase(iter);
linkSlowCase(iter);
move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code());
addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
emitPutCallResult(instruction);
}