本文整理汇总了C++中VG_TRACK函数的典型用法代码示例。如果您正苦于以下问题:C++ VG_TRACK函数的具体用法?C++ VG_TRACK怎么用?C++ VG_TRACK使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了VG_TRACK函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: VG_
/* A thread acquires a mutex. Fails if:
- thread is not blocked waiting for the mutex
- mutex is held by another thread
*/
void VG_(tm_mutex_acquire)(ThreadId tid, Addr mutexp)
{
struct mutex *mx;
mx = mutex_check_initialized(tid, mutexp, "acquiring");
switch(mx->state) {
case MX_Unlocking: /* ownership transfer or relock */
VG_TRACK( post_mutex_unlock, mx->owner, (void *)mutexp );
if (mx->owner != tid)
thread_unblock_mutex(tid, mx, "acquiring mutex");
break;
case MX_Free:
thread_unblock_mutex(tid, mx, "acquiring mutex");
break;
case MX_Locked:
if (debug_mutex)
VG_(printf)("mutex=%p mx->state=%s\n", mutexp, pp_mutexstate(mx));
VG_TRACK( post_mutex_unlock, mx->owner, (void *)mutexp );
mutex_report(tid, mutexp, MXE_Locked, "acquiring");
thread_unblock_mutex(tid, mx, "acquiring mutex");
break;
case MX_Init:
case MX_Dead:
vg_assert(0);
}
mx->owner = tid;
mutex_setstate(tid, mx, MX_Locked);
VG_TRACK( post_mutex_lock, tid, (void *)mutexp );
}
示例2: VG_
void VG_(unknown_esp_update)(Addr new_ESP)
{
Addr old_ESP = VG_(get_archreg)(R_ESP);
Int delta = (Int)new_ESP - (Int)old_ESP;
if (delta < -(VG_HUGE_DELTA) || VG_HUGE_DELTA < delta) {
/* %esp has changed by more than HUGE_DELTA. We take this to mean
that the application is switching to a new stack, for whatever
reason.
JRS 20021001: following discussions with John Regehr, if a stack
switch happens, it seems best not to mess at all with memory
permissions. Seems to work well with Netscape 4.X. Really the
only remaining difficulty is knowing exactly when a stack switch is
happening. */
if (VG_(clo_verbosity) > 1)
VG_(message)(Vg_UserMsg, "Warning: client switching stacks? "
"%%esp: %p --> %p", old_ESP, new_ESP);
} else if (delta < 0) {
VG_TRACK( new_mem_stack, new_ESP, -delta );
} else if (delta > 0) {
VG_TRACK( die_mem_stack, old_ESP, delta );
}
}
示例3: alloc_and_new_mem
/* Allocate memory, noticing whether or not we are doing the full
instrumentation thing. */
static __inline__
void* alloc_and_new_mem ( ThreadState* tst, UInt size, UInt alignment,
Bool is_zeroed, VgAllocKind kind )
{
Addr p;
VGP_PUSHCC(VgpCliMalloc);
vg_cmalloc_n_mallocs ++;
vg_cmalloc_bs_mallocd += size;
vg_assert(alignment >= 4);
if (alignment == 4)
p = (Addr)VG_(arena_malloc)(VG_AR_CLIENT, size);
else
p = (Addr)VG_(arena_malloc_aligned)(VG_AR_CLIENT, alignment, size);
if (needs_shadow_chunks())
addShadowChunk ( tst, p, size, kind );
VG_TRACK( ban_mem_heap, p-VG_AR_CLIENT_REDZONE_SZB,
VG_AR_CLIENT_REDZONE_SZB );
VG_TRACK( new_mem_heap, p, size, is_zeroed );
VG_TRACK( ban_mem_heap, p+size, VG_AR_CLIENT_REDZONE_SZB );
VGP_POPCC(VgpCliMalloc);
return (void*)p;
}
示例4: VG_
void VG_(sigframe_destroy)( ThreadId tid, Bool isRT )
{
Addr esp;
ThreadState* tst;
SizeT size;
Int sigNo;
tst = VG_(get_ThreadState)(tid);
esp = tst->arch.vex.guest_ESP;
if (!isRT)
size = restore_sigframe(tst, (struct sigframe *)esp, &sigNo);
else
size = restore_rt_sigframe(tst, (struct rt_sigframe *)esp, &sigNo);
VG_TRACK( die_mem_stack_signal, esp - VG_STACK_REDZONE_SZB,
size + VG_STACK_REDZONE_SZB );
if (VG_(clo_trace_signals))
VG_(message)(
Vg_DebugMsg,
"VG_(signal_return) (thread %d): isRT=%d valid magic; EIP=%#x\n",
tid, isRT, tst->arch.vex.guest_EIP);
VG_TRACK( post_deliver_signal, tid, sigNo );
}
示例5: VG_
/* EXPORTED */
void VG_(sigframe_destroy)( ThreadId tid, Bool isRT )
{
Addr sp;
ThreadState* tst;
SizeT size;
Int sigNo;
tst = VG_(get_ThreadState)(tid);
/* Correctly reestablish the frame base address. */
sp = tst->arch.vex.guest_SP;
if (!isRT)
size = restore_sigframe(tst, (struct sigframe *)sp, &sigNo);
else
size = restore_rt_sigframe(tst, (struct rt_sigframe *)sp, &sigNo);
/* same as for creation: we must announce the full memory (including
alignment), otherwise massif might fail on longjmp */
VG_TRACK( die_mem_stack_signal, sp - VG_STACK_REDZONE_SZB,
size + VG_STACK_REDZONE_SZB );
if (VG_(clo_trace_signals))
VG_(message)(
Vg_DebugMsg,
"VG_(sigframe_destroy) (thread %u): isRT=%d valid magic; IP=%#llx\n",
tid, isRT, tst->arch.vex.guest_IA);
/* tell the tools */
VG_TRACK( post_deliver_signal, tid, sigNo );
}
示例6: VG_
void VG_(sigframe_destroy)( ThreadId tid )
{
Addr rsp;
ThreadState* tst;
SizeT size;
Int sigNo;
tst = VG_(get_ThreadState)(tid);
/* Correctly reestablish the frame base address. */
rsp = tst->arch.vex.guest_RSP;
size = restore_sigframe(tst, (struct sigframe *)rsp, &sigNo);
VG_TRACK( die_mem_stack_signal, rsp - VG_STACK_REDZONE_SZB,
size + VG_STACK_REDZONE_SZB );
if (VG_(clo_trace_signals))
VG_(message)(
Vg_DebugMsg,
"VG_(signal_return) (thread %d): valid magic; RIP=%#llx\n",
tid, tst->arch.vex.guest_RIP);
/* tell the tools */
VG_TRACK( post_deliver_signal, tid, sigNo );
}
示例7: startup_segment_callback
static
void startup_segment_callback ( Addr start, UInt size,
Char rr, Char ww, Char xx,
UInt foffset, UChar* filename )
{
UInt r_esp;
Bool is_stack_segment;
Bool verbose = False; /* set to True for debugging */
if (verbose)
VG_(message)(Vg_DebugMsg,
"initial map %8x-%8x %c%c%c? %8x (%d) (%s)",
start,start+size,rr,ww,xx,foffset,
size, filename?filename:(UChar*)"NULL");
if (rr != 'r' && xx != 'x' && ww != 'w') {
/* Implausible as it seems, R H 6.2 generates such segments:
40067000-400ac000 r-xp 00000000 08:05 320686 /usr/X11R6/lib/libXt.so.6.0
400ac000-400ad000 ---p 00045000 08:05 320686 /usr/X11R6/lib/libXt.so.6.0
400ad000-400b0000 rw-p 00045000 08:05 320686 /usr/X11R6/lib/libXt.so.6.0
when running xedit. So just ignore them. */
if (0)
VG_(printf)("No permissions on a segment mapped from %s\n",
filename?filename:(UChar*)"NULL");
return;
}
/* If this segment corresponds to something mmap'd /dev/zero by the
low-level memory manager (vg_malloc2.c), skip it. Clients
should never have access to the segments which hold valgrind
internal data. And access to client data in the VG_AR_CLIENT
arena is mediated by the skin, so we don't want make it
accessible at this stage. */
if (VG_(is_inside_segment_mmapd_by_low_level_MM)( start )) {
if (verbose)
VG_(message)(Vg_DebugMsg,
" skipping %8x-%8x (owned by our MM)",
start, start+size );
/* Don't announce it to the skin. */
return;
}
/* This parallels what happens when we mmap some new memory */
if (filename != NULL && xx == 'x') {
VG_(new_exe_segment)( start, size );
}
VG_TRACK( new_mem_startup, start, size, rr=='r', ww=='w', xx=='x' );
/* If this is the stack segment mark all below %esp as noaccess. */
r_esp = VG_(baseBlock)[VGOFF_(m_esp)];
is_stack_segment = start <= r_esp && r_esp < start+size;
if (is_stack_segment) {
if (0)
VG_(message)(Vg_DebugMsg, "invalidating stack area: %x .. %x",
start,r_esp);
VG_TRACK( die_mem_stack, start, r_esp-start );
}
}
示例8: stack_mcontext
static
void stack_mcontext ( struct vki_mcontext *mc,
ThreadState* tst,
Bool use_rt_sigreturn,
UInt fault_addr )
{
VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext",
(Addr)mc, sizeof(struct vki_pt_regs) );
# define DO(gpr) mc->mc_gregs[VKI_PT_R0+gpr] = tst->arch.vex.guest_GPR##gpr
DO(0); DO(1); DO(2); DO(3); DO(4); DO(5); DO(6); DO(7);
DO(8); DO(9); DO(10); DO(11); DO(12); DO(13); DO(14); DO(15);
DO(16); DO(17); DO(18); DO(19); DO(20); DO(21); DO(22); DO(23);
DO(24); DO(25); DO(26); DO(27); DO(28); DO(29); DO(30); DO(31);
# undef DO
mc->mc_gregs[VKI_PT_NIP] = tst->arch.vex.guest_CIA;
mc->mc_gregs[VKI_PT_MSR] = 0xf032; /* pretty arbitrary */
mc->mc_gregs[VKI_PT_ORIG_R3] = tst->arch.vex.guest_GPR3;
mc->mc_gregs[VKI_PT_CTR] = tst->arch.vex.guest_CTR;
mc->mc_gregs[VKI_PT_LNK] = tst->arch.vex.guest_LR;
mc->mc_gregs[VKI_PT_XER] = LibVEX_GuestPPC32_get_XER(&tst->arch.vex);
mc->mc_gregs[VKI_PT_CCR] = LibVEX_GuestPPC32_get_CR(&tst->arch.vex);
mc->mc_gregs[VKI_PT_MQ] = 0;
mc->mc_gregs[VKI_PT_TRAP] = 0;
mc->mc_gregs[VKI_PT_DAR] = fault_addr;
mc->mc_gregs[VKI_PT_DSISR] = 0;
mc->mc_gregs[VKI_PT_RESULT] = 0;
VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid,
(Addr)mc, sizeof(struct vki_pt_regs) );
/* XXX should do FP and vector regs */
/* set up signal return trampoline */
/* NB. 5 Sept 07. mc->mc_pad[0..1] used to contain a the code to
which the signal handler returns, and it just did sys_sigreturn
or sys_rt_sigreturn. But this doesn't work if the stack is
non-executable, and it isn't consistent with the x86-linux and
amd64-linux scheme for removing the stack frame. So instead be
consistent and use a stub in m_trampoline. Then it doesn't
matter whether or not the (guest) stack is executable. This
fixes #149519 and #145837. */
VG_TRACK(pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext",
(Addr)&mc->mc_pad, sizeof(mc->mc_pad));
mc->mc_pad[0] = 0; /* invalid */
mc->mc_pad[1] = 0; /* invalid */
VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid,
(Addr)&mc->mc_pad, sizeof(mc->mc_pad) );
/* invalidate any translation of this area */
VG_(discard_translations)( (Addr)&mc->mc_pad,
sizeof(mc->mc_pad), "stack_mcontext" );
/* set the signal handler to return to the trampoline */
SET_SIGNAL_LR(tst, (Addr)(use_rt_sigreturn
? (Addr)&VG_(ppc32_linux_SUBST_FOR_rt_sigreturn)
: (Addr)&VG_(ppc32_linux_SUBST_FOR_sigreturn)
));
}
示例9: VG_
void VG_(unknown_SP_update)( Addr old_SP, Addr new_SP, UInt ecu )
{
static Int moans = 3;
Word delta = (Word)new_SP - (Word)old_SP;
/* Check if the stack pointer is still in the same stack as before. */
if (current_stack == NULL ||
new_SP < current_stack->start || new_SP > current_stack->end) {
Stack* new_stack = find_stack_by_addr(new_SP);
if (new_stack
&& (current_stack == NULL || new_stack->id != current_stack->id)) {
/* The stack pointer is now in another stack. Update the current
stack information and return without doing anything else. */
current_stack = new_stack;
return;
}
}
if (delta < -VG_(clo_max_stackframe) || VG_(clo_max_stackframe) < delta) {
/* SP has changed by more than some threshold amount (by
default, 2MB). We take this to mean that the application is
switching to a new stack, for whatever reason.
JRS 20021001: following discussions with John Regehr, if a stack
switch happens, it seems best not to mess at all with memory
permissions. Seems to work well with Netscape 4.X. Really the
only remaining difficulty is knowing exactly when a stack switch is
happening. */
if (VG_(clo_verbosity) > 0 && moans > 0) {
moans--;
VG_(message)(Vg_UserMsg,
"Warning: client switching stacks? "
"SP change: %p --> %p", old_SP, new_SP);
VG_(message)(Vg_UserMsg,
" to suppress, use: --max-stackframe=%ld or greater",
(delta < 0 ? -delta : delta));
if (moans == 0)
VG_(message)(Vg_UserMsg,
" further instances of this message "
"will not be shown.");
}
} else if (delta < 0) {
VG_TRACK( new_mem_stack_w_ECU, new_SP, -delta, ecu );
VG_TRACK( new_mem_stack, new_SP, -delta );
} else if (delta > 0) {
VG_TRACK( die_mem_stack, old_SP, delta );
}
}
示例10: extend
static Bool extend ( ThreadState *tst, Addr addr, SizeT size )
{
ThreadId tid = tst->tid;
NSegment const* stackseg = NULL;
if (VG_(extend_stack)(addr, tst->client_stack_szB)) {
stackseg = VG_(am_find_nsegment)(addr);
if (0 && stackseg)
VG_(printf)("frame=%#lx seg=%#lx-%#lx\n",
addr, stackseg->start, stackseg->end);
}
if (stackseg == NULL || !stackseg->hasR || !stackseg->hasW) {
VG_(message)(
Vg_UserMsg,
"Can't extend stack to %#lx during signal delivery for thread %d:\n",
addr, tid);
if (stackseg == NULL)
VG_(message)(Vg_UserMsg, " no stack segment\n");
else
VG_(message)(Vg_UserMsg, " too small or bad protection modes\n");
VG_(set_default_handler)(VKI_SIGSEGV);
VG_(synth_fault_mapping)(tid, addr);
return False;
}
VG_TRACK( new_mem_stack_signal, addr - VG_STACK_REDZONE_SZB,
size + VG_STACK_REDZONE_SZB, tid );
return True;
}
示例11: extend
static Bool extend ( ThreadState *tst, Addr addr, SizeT size )
{
ThreadId tid = tst->tid;
VG_TRACK( new_mem_stack_signal,
addr - VG_STACK_REDZONE_SZB, size, tid );
return True;
}
示例12: VG_
/* EXPORTED */
void VG_(sigframe_create)( ThreadId tid,
Addr sp_top_of_frame,
const vki_siginfo_t *siginfo,
const struct vki_ucontext *siguc,
void *handler,
UInt flags,
const vki_sigset_t *mask,
void *restorer )
{
Addr sp;
ThreadState* tst = VG_(get_ThreadState)(tid);
if (flags & VKI_SA_SIGINFO)
sp = build_rt_sigframe(tst, sp_top_of_frame, siginfo, siguc,
flags, mask, restorer);
else
sp = build_sigframe(tst, sp_top_of_frame, siginfo, siguc,
flags, mask, restorer);
/* Set the thread so it will next run the handler. */
VG_(set_SP)(tid, sp);
VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(Addr));
tst->arch.vex.guest_IA = (Addr) handler;
/* We might have interrupted a repeating instruction that uses the guest
counter. Since our VEX requires that a new instruction will see a
guest counter == 0, we have to set it here. The old value will be
restored by restore_vg_sigframe. */
tst->arch.vex.guest_counter = 0;
/* This thread needs to be marked runnable, but we leave that the
caller to do. */
}
示例13: VG_
/* EXPORTED */
void VG_(sigframe_create)( ThreadId tid,
Addr esp_top_of_frame,
const vki_siginfo_t *siginfo,
const struct vki_ucontext *siguc,
void *handler,
UInt flags,
const vki_sigset_t *mask,
void *restorer )
{
Addr esp;
ThreadState* tst = VG_(get_ThreadState)(tid);
if (flags & VKI_SA_SIGINFO)
esp = build_rt_sigframe(tst, esp_top_of_frame, siginfo, siguc,
handler, flags, mask, restorer);
else
esp = build_sigframe(tst, esp_top_of_frame, siginfo, siguc,
handler, flags, mask, restorer);
/* Set the thread so it will next run the handler. */
/* tst->m_esp = esp; also notify the tool we've updated ESP */
VG_(set_SP)(tid, esp);
VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(Addr));
//VG_(printf)("handler = %p\n", handler);
tst->arch.vex.guest_EIP = (Addr) handler;
/* This thread needs to be marked runnable, but we leave that the
caller to do. */
if (0)
VG_(printf)("pushed signal frame; %%ESP now = %#lx, "
"next %%EIP = %#x, status=%d\n",
esp, tst->arch.vex.guest_EIP, tst->status);
}
示例14: stack_mcontext
static
void stack_mcontext ( struct vki_mcontext *mc,
ThreadState* tst,
Int ret,
UInt fault_addr )
{
VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext",
(Addr)mc, sizeof(struct vki_pt_regs) );
# define DO(gpr) mc->mc_gregs[VKI_PT_R0+gpr] = tst->arch.vex.guest_GPR##gpr
DO(0); DO(1); DO(2); DO(3); DO(4); DO(5); DO(6); DO(7);
DO(8); DO(9); DO(10); DO(11); DO(12); DO(13); DO(14); DO(15);
DO(16); DO(17); DO(18); DO(19); DO(20); DO(21); DO(22); DO(23);
DO(24); DO(25); DO(26); DO(27); DO(28); DO(29); DO(30); DO(31);
# undef DO
mc->mc_gregs[VKI_PT_NIP] = tst->arch.vex.guest_CIA;
mc->mc_gregs[VKI_PT_MSR] = 0xf032; /* pretty arbitrary */
mc->mc_gregs[VKI_PT_ORIG_R3] = tst->arch.vex.guest_GPR3;
mc->mc_gregs[VKI_PT_CTR] = tst->arch.vex.guest_CTR;
mc->mc_gregs[VKI_PT_LNK] = tst->arch.vex.guest_LR;
mc->mc_gregs[VKI_PT_XER] = LibVEX_GuestPPC32_get_XER(&tst->arch.vex);
mc->mc_gregs[VKI_PT_CCR] = LibVEX_GuestPPC32_get_CR(&tst->arch.vex);
mc->mc_gregs[VKI_PT_MQ] = 0;
mc->mc_gregs[VKI_PT_TRAP] = 0;
mc->mc_gregs[VKI_PT_DAR] = fault_addr;
mc->mc_gregs[VKI_PT_DSISR] = 0;
mc->mc_gregs[VKI_PT_RESULT] = 0;
VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid,
(Addr)mc, sizeof(struct vki_pt_regs) );
/* XXX should do FP and vector regs */
/* set up signal return trampoline */
VG_TRACK(pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext",
(Addr)&mc->mc_pad, sizeof(mc->mc_pad));
mc->mc_pad[0] = 0x38000000U + ret; /* li 0,ret */
mc->mc_pad[1] = 0x44000002U; /* sc */
VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid,
(Addr)&mc->mc_pad, sizeof(mc->mc_pad) );
/* invalidate any translation of this area */
VG_(discard_translations)( (Addr64)(Addr)&mc->mc_pad,
sizeof(mc->mc_pad), "stack_mcontext" );
/* set the signal handler to return to the trampoline */
SET_SIGNAL_LR(tst, (Addr) &mc->mc_pad[0]);
}
示例15: VG_
/* EXPORTED */
void VG_(sigframe_create)( ThreadId tid,
Bool on_altstack,
Addr sp_top_of_frame,
const vki_siginfo_t *siginfo,
const struct vki_ucontext *siguc,
void *handler,
UInt flags,
const vki_sigset_t *mask,
void *restorer )
{
ThreadState *tst;
Addr sp = sp_top_of_frame;
Int sigNo = siginfo->si_signo;
UInt size;
tst = VG_(get_ThreadState)(tid);
size = sizeof(struct rt_sigframe);
sp -= size;
sp = VG_ROUNDDN(sp, 16);
if (! ML_(sf_maybe_extend_stack)(tst, sp, size, flags))
return; // Give up. No idea if this is correct
struct rt_sigframe *rsf = (struct rt_sigframe *)sp;
/* Track our writes to siginfo */
VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, /* VVVVV */
"signal handler siginfo", (Addr)rsf,
offsetof(struct rt_sigframe, sig));
VG_(memcpy)(&rsf->info, siginfo, sizeof(vki_siginfo_t));
if (sigNo == VKI_SIGILL && siginfo->si_code > 0) {
rsf->info._sifields._sigfault._addr
= (Addr*)(tst)->arch.vex.guest_PC;
}
VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, /* ^^^^^ */
(Addr)rsf, offsetof(struct rt_sigframe, sig));
build_sigframe(tst, &rsf->sig, siginfo, siguc,
handler, flags, mask, restorer);
tst->arch.vex.guest_X1 = (Addr)&rsf->info;
tst->arch.vex.guest_X2 = (Addr)&rsf->sig.uc;
VG_(set_SP)(tid, sp);
VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR,
sizeof(Addr));
tst->arch.vex.guest_X0 = sigNo;
if (flags & VKI_SA_RESTORER)
tst->arch.vex.guest_X30 = (Addr)restorer;
else
tst->arch.vex.guest_X30
= (Addr)&VG_(arm64_linux_SUBST_FOR_rt_sigreturn);
tst->arch.vex.guest_PC = (Addr)handler;
}