本文整理汇总了C++中VALGRIND_MAKE_MEM_DEFINED函数的典型用法代码示例。如果您正苦于以下问题:C++ VALGRIND_MAKE_MEM_DEFINED函数的具体用法?C++ VALGRIND_MAKE_MEM_DEFINED怎么用?C++ VALGRIND_MAKE_MEM_DEFINED使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了VALGRIND_MAKE_MEM_DEFINED函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: uct_cm_iface_handle_sidr_req
static void uct_cm_iface_handle_sidr_req(uct_cm_iface_t *iface,
struct ib_cm_event *event)
{
uct_cm_hdr_t *hdr = event->private_data;
struct ib_cm_sidr_rep_param rep;
ucs_status_t status;
void *cm_desc, *desc;
int ret;
VALGRIND_MAKE_MEM_DEFINED(hdr, sizeof(hdr));
VALGRIND_MAKE_MEM_DEFINED(hdr + 1, hdr->length);
uct_cm_iface_trace_data(iface, UCT_AM_TRACE_TYPE_RECV, hdr, "RX: SIDR_REQ");
/* Allocate temporary buffer to serve as receive descriptor */
cm_desc = ucs_malloc(iface->super.config.rx_payload_offset + hdr->length,
"cm_recv_desc");
if (cm_desc == NULL) {
ucs_error("failed to allocate cm receive descriptor");
return;
}
/* Send reply */
ucs_trace_data("TX: SIDR_REP");
memset(&rep, 0, sizeof rep);
rep.status = IB_SIDR_SUCCESS;
ret = ib_cm_send_sidr_rep(event->cm_id, &rep);
if (ret) {
ucs_error("ib_cm_send_sidr_rep() failed: %m");
}
/* Call active message handler */
desc = cm_desc + iface->super.config.rx_headroom_offset;
uct_recv_desc_iface(desc) = &iface->super.super.super;
status = uct_iface_invoke_am(&iface->super.super, hdr->am_id, hdr + 1,
hdr->length, desc);
if (status == UCS_OK) {
ucs_free(cm_desc);
}
}
示例2: transport_read_layer
int transport_read_layer(rdpTransport* transport, BYTE* data, int bytes)
{
int read = 0;
int status = -1;
if (!transport->frontBio)
{
transport->layer = TRANSPORT_LAYER_CLOSED;
return -1;
}
while (read < bytes)
{
status = BIO_read(transport->frontBio, data + read, bytes - read);
if (status <= 0)
{
if (!transport->frontBio || !BIO_should_retry(transport->frontBio))
{
/* something unexpected happened, let's close */
if (!transport->frontBio)
{
WLog_ERR(TAG, "BIO_read: transport->frontBio null");
return -1;
}
WLog_ERR_BIO(TAG, "BIO_read", transport->frontBio);
transport->layer = TRANSPORT_LAYER_CLOSED;
return -1;
}
/* non blocking will survive a partial read */
if (!transport->blocking)
return read;
/* blocking means that we can't continue until we have read the number of requested bytes */
if (BIO_wait_read(transport->frontBio, 100) < 0)
{
WLog_ERR_BIO(TAG, "BIO_wait_read", transport->frontBio);
return -1;
}
continue;
}
#ifdef HAVE_VALGRIND_MEMCHECK_H
VALGRIND_MAKE_MEM_DEFINED(data + read, bytes - read);
#endif
read += status;
}
return read;
}
示例3: check_growth_finish
void BakerGC::reset() {
check_growth_finish();
next->reset();
eden->reset();
#ifdef HAVE_VALGRIND_H
(void)VALGRIND_MAKE_MEM_NOACCESS(next->start().as_int(), next->size());
(void)VALGRIND_MAKE_MEM_DEFINED(current->start().as_int(), current->size());
#endif
mprotect(next->start(), next->size(), PROT_NONE);
mprotect(current->start(), current->size(), PROT_READ | PROT_WRITE);
}
示例4: sec_block_create
static Block*
sec_block_create (size_t size,
const char *during_tag)
{
Block *block;
Cell *cell;
ASSERT (during_tag);
/* We can force all all memory to be malloced */
if (getenv ("SECMEM_FORCE_FALLBACK"))
return NULL;
block = pool_alloc ();
if (!block)
return NULL;
cell = pool_alloc ();
if (!cell) {
pool_free (block);
return NULL;
}
/* The size above is a minimum, we're free to go bigger */
if (size < DEFAULT_BLOCK_SIZE)
size = DEFAULT_BLOCK_SIZE;
block->words = sec_acquire_pages (&size, during_tag);
block->n_words = size / sizeof (word_t);
if (!block->words) {
pool_free (block);
pool_free (cell);
return NULL;
}
#ifdef WITH_VALGRIND
VALGRIND_MAKE_MEM_DEFINED (block->words, size);
#endif
/* The first cell to allocate from */
cell->words = block->words;
cell->n_words = block->n_words;
cell->requested = 0;
sec_write_guards (cell);
sec_insert_cell_ring (&block->unused_cells, cell);
block->next = all_blocks;
all_blocks = block;
return block;
}
示例5: rdma_leave_multicast
int rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
{
struct ucma_abi_destroy_id *cmd;
struct ucma_abi_destroy_id_resp *resp;
struct cma_id_private *id_priv;
struct cma_multicast *mc, **pos;
void *msg;
int ret, size, addrlen;
addrlen = ucma_addrlen(addr);
if (!addrlen)
return ERR(EINVAL);
id_priv = container_of(id, struct cma_id_private, id);
pthread_mutex_lock(&id_priv->mut);
for (pos = &id_priv->mc_list; *pos; pos = &(*pos)->next)
if (!memcmp(&(*pos)->addr, addr, addrlen))
break;
mc = *pos;
if (*pos)
*pos = mc->next;
pthread_mutex_unlock(&id_priv->mut);
if (!mc)
return ERR(EADDRNOTAVAIL);
if (id->qp)
ibv_detach_mcast(id->qp, &mc->mgid, mc->mlid);
CMA_CREATE_MSG_CMD_RESP(msg, cmd, resp, UCMA_CMD_LEAVE_MCAST, size);
cmd->id = mc->handle;
ret = write(id->channel->fd, msg, size);
if (ret != size) {
ret = (ret >= 0) ? ERR(ECONNREFUSED) : -1;
goto free;
}
VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
pthread_mutex_lock(&id_priv->mut);
while (mc->events_completed < resp->events_reported)
pthread_cond_wait(&mc->cond, &id_priv->mut);
pthread_mutex_unlock(&id_priv->mut);
ret = 0;
free:
free(mc);
return ret;
}
示例6: main
int main(void)
{
int *array, *array3;
int x;
array = custom_alloc(sizeof(int) * 10);
array[8] = 8;
array[9] = 8;
array[10] = 10; // invalid write (ok w/o MALLOCLIKE -- in superblock)
VALGRIND_RESIZEINPLACE_BLOCK(array, sizeof(int) * 10, sizeof(int) * 5, RZ);
array[4] = 7;
array[5] = 9; // invalid write
// Make the entire array defined again such that it can be verified whether
// the red zone is marked properly when resizing in place.
VALGRIND_MAKE_MEM_DEFINED(array, sizeof(int) * 10);
VALGRIND_RESIZEINPLACE_BLOCK(array, sizeof(int) * 5, sizeof(int) * 7, RZ);
if (array[5]) array[4]++; // uninitialized read of array[5]
array[5] = 11;
array[6] = 7;
array[7] = 8; // invalid write
// invalid realloc
VALGRIND_RESIZEINPLACE_BLOCK(array+1, sizeof(int) * 7, sizeof(int) * 8, RZ);
custom_free(array); // ok
custom_free((void*)0x1); // invalid free
array3 = malloc(sizeof(int) * 10);
custom_free(array3); // mismatched free (ok without MALLOCLIKE)
make_leak();
x = array[0]; // use after free (ok without MALLOCLIKE/MAKE_MEM_NOACCESS)
// (nb: initialised because is_zeroed==1 above)
// unfortunately not identified as being in a free'd
// block because the freeing of the block and shadow
// chunk isn't postponed.
// Bug 137073: passing 0 to MALLOCLIKE_BLOCK was causing an assertion
// failure. Test for this (and likewise for FREELIKE_BLOCK).
VALGRIND_MALLOCLIKE_BLOCK(0,0,0,0);
VALGRIND_FREELIKE_BLOCK(0,0);
return x;
// leak from make_leak()
}
示例7: AddCatalogInvalidationMessage
/*
* Add a whole-catalog inval entry
*/
static void
AddCatalogInvalidationMessage(InvalidationListHeader *hdr,
Oid dbId, Oid catId)
{
SharedInvalidationMessage msg;
msg.cat.id = SHAREDINVALCATALOG_ID;
msg.cat.dbId = dbId;
msg.cat.catId = catId;
/* check AddCatcacheInvalidationMessage() for an explanation */
VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
AddInvalidationMessage(&hdr->cclist, &msg);
}
示例8: _cairo_freelist_fini
void
_cairo_freelist_fini (cairo_freelist_t *freelist)
{
cairo_freelist_node_t *node = freelist->first_free_node;
while (node) {
cairo_freelist_node_t *next;
VG (VALGRIND_MAKE_MEM_DEFINED (node, sizeof (node->next)));
next = node->next;
free (node);
node = next;
}
}
示例9: CacheInvalidateSmgr
/*
* CacheInvalidateSmgr
* Register invalidation of smgr references to a physical relation.
*
* Sending this type of invalidation msg forces other backends to close open
* smgr entries for the rel. This should be done to flush dangling open-file
* references when the physical rel is being dropped or truncated. Because
* these are nontransactional (i.e., not-rollback-able) operations, we just
* send the inval message immediately without any queuing.
*
* Note: in most cases there will have been a relcache flush issued against
* the rel at the logical level. We need a separate smgr-level flush because
* it is possible for backends to have open smgr entries for rels they don't
* have a relcache entry for, e.g. because the only thing they ever did with
* the rel is write out dirty shared buffers.
*
* Note: because these messages are nontransactional, they won't be captured
* in commit/abort WAL entries. Instead, calls to CacheInvalidateSmgr()
* should happen in low-level smgr.c routines, which are executed while
* replaying WAL as well as when creating it.
*
* Note: In order to avoid bloating SharedInvalidationMessage, we store only
* three bytes of the backend ID using what would otherwise be padding space.
* Thus, the maximum possible backend ID is 2^23-1.
*/
void
CacheInvalidateSmgr(RelFileNodeBackend rnode)
{
SharedInvalidationMessage msg;
msg.sm.id = SHAREDINVALSMGR_ID;
msg.sm.backend_hi = rnode.backend >> 16;
msg.sm.backend_lo = rnode.backend & 0xffff;
msg.sm.rnode = rnode.node;
/* check AddCatcacheInvalidationMessage() for an explanation */
VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
SendSharedInvalidMessages(&msg, 1);
}
示例10: poll_cm_events
/// The connection manager event handler.
void poll_cm_events()
{
int err;
struct rdma_cm_event* event;
struct rdma_cm_event event_copy;
void* private_data_copy = nullptr;
while ((err = rdma_get_cm_event(ec_, &event)) == 0) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
VALGRIND_MAKE_MEM_DEFINED(event, sizeof(struct rdma_cm_event));
memcpy(&event_copy, event, sizeof(struct rdma_cm_event));
if (event_copy.param.conn.private_data) {
VALGRIND_MAKE_MEM_DEFINED(
event_copy.param.conn.private_data,
event_copy.param.conn.private_data_len);
private_data_copy =
malloc(event_copy.param.conn.private_data_len);
if (!private_data_copy)
throw InfinibandException("malloc failed");
memcpy(private_data_copy, event_copy.param.conn.private_data,
event_copy.param.conn.private_data_len);
event_copy.param.conn.private_data = private_data_copy;
}
#pragma GCC diagnostic pop
rdma_ack_cm_event(event);
on_cm_event(&event_copy);
if (private_data_copy) {
free(private_data_copy);
private_data_copy = nullptr;
}
}
if (err == -1 && errno == EAGAIN)
return;
if (err)
throw InfinibandException("rdma_get_cm_event failed");
}
示例11: dm_free_aux
void dm_free_aux(void *p)
{
char *ptr;
size_t i;
struct memblock *mb = ((struct memblock *) p) - 1;
if (!p)
return;
dm_bounds_check();
/* sanity check */
assert(mb->magic == p);
#ifdef VALGRIND_POOL
VALGRIND_MAKE_MEM_DEFINED(p, mb->length);
#endif
/* check data at the far boundary */
ptr = (char *) p + mb->length;
for (i = 0; i < sizeof(unsigned long); i++)
if (ptr[i] != (char) mb->id)
assert(!"Damage at far end of block");
/* have we freed this before ? */
assert(mb->id != 0);
/* unlink */
if (mb->prev)
mb->prev->next = mb->next;
else
_head = mb->next;
if (mb->next)
mb->next->prev = mb->prev;
else
_tail = mb->prev;
mb->id = 0;
/* stomp a different pattern across the memory */
ptr = p;
for (i = 0; i < mb->length; i++)
ptr[i] = i & 1 ? (char) 0xde : (char) 0xad;
assert(_mem_stats.blocks_allocated);
_mem_stats.blocks_allocated--;
_mem_stats.bytes -= mb->length;
/* free the memory */
free(mb);
}
示例12: ruby_signal
static sighandler_t
ruby_signal(int signum, sighandler_t handler)
{
struct sigaction sigact, old;
#if 0
rb_trap_accept_nativethreads[signum] = 0;
#endif
sigemptyset(&sigact.sa_mask);
#ifdef USE_SIGALTSTACK
if (handler == SIG_IGN || handler == SIG_DFL) {
sigact.sa_handler = handler;
sigact.sa_flags = 0;
}
else {
sigact.sa_sigaction = (ruby_sigaction_t*)handler;
sigact.sa_flags = SA_SIGINFO;
}
#else
sigact.sa_handler = handler;
sigact.sa_flags = 0;
#endif
switch (signum) {
#ifdef SA_NOCLDWAIT
case SIGCHLD:
if (handler == SIG_IGN)
sigact.sa_flags |= SA_NOCLDWAIT;
break;
#endif
#if defined(SA_ONSTACK) && defined(USE_SIGALTSTACK)
case SIGSEGV:
#ifdef SIGBUS
case SIGBUS:
#endif
sigact.sa_flags |= SA_ONSTACK;
break;
#endif
}
(void)VALGRIND_MAKE_MEM_DEFINED(&old, sizeof(old));
if (sigaction(signum, &sigact, &old) < 0) {
return SIG_ERR;
}
if (old.sa_flags & SA_SIGINFO)
return (sighandler_t)old.sa_sigaction;
else
return old.sa_handler;
}
示例13: ucma_query_route
static int ucma_query_route(struct rdma_cm_id *id)
{
struct ucma_abi_query_route_resp *resp;
struct ucma_abi_query_route *cmd;
struct cma_id_private *id_priv;
void *msg;
int ret, size, i;
CMA_CREATE_MSG_CMD_RESP(msg, cmd, resp, UCMA_CMD_QUERY_ROUTE, size);
id_priv = container_of(id, struct cma_id_private, id);
cmd->id = id_priv->handle;
ret = write(id->channel->fd, msg, size);
if (ret != size)
return (ret >= 0) ? ERR(ECONNREFUSED) : -1;
VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
if (resp->num_paths) {
id->route.path_rec = malloc(sizeof *id->route.path_rec *
resp->num_paths);
if (!id->route.path_rec)
return ERR(ENOMEM);
id->route.num_paths = resp->num_paths;
for (i = 0; i < resp->num_paths; i++)
ibv_copy_path_rec_from_kern(&id->route.path_rec[i],
&resp->ib_route[i]);
}
memcpy(id->route.addr.addr.ibaddr.sgid.raw, resp->ib_route[0].sgid,
sizeof id->route.addr.addr.ibaddr.sgid);
memcpy(id->route.addr.addr.ibaddr.dgid.raw, resp->ib_route[0].dgid,
sizeof id->route.addr.addr.ibaddr.dgid);
id->route.addr.addr.ibaddr.pkey = resp->ib_route[0].pkey;
memcpy(&id->route.addr.src_addr, &resp->src_addr,
sizeof resp->src_addr);
memcpy(&id->route.addr.dst_addr, &resp->dst_addr,
sizeof resp->dst_addr);
if (!id_priv->cma_dev && resp->node_guid) {
ret = ucma_get_device(id_priv, resp->node_guid);
if (ret)
return ret;
id_priv->id.port_num = resp->port_num;
}
return 0;
}
示例14: psmi_mq_req_copy
static void __recvpath
psmi_mq_req_copy(psm_mq_req_t req, psm_epaddr_t epaddr, const void *buf,
uint32_t nbytes)
{
// recv_msglen may be changed by unexpected receive buf.
uint32_t msglen_left = req->recv_msglen - req->recv_msgoff;
uint32_t msglen_this = min(msglen_left, nbytes);
uint8_t *msgptr = (uint8_t *)req->buf + req->recv_msgoff;
VALGRIND_MAKE_MEM_DEFINED(msgptr, msglen_this);
psmi_mq_mtucpy(msgptr, buf, msglen_this);
req->recv_msgoff += msglen_this;
req->send_msgoff += nbytes;
return;
}
示例15: _cairo_freelist_alloc
void *
_cairo_freelist_alloc (cairo_freelist_t *freelist)
{
if (freelist->first_free_node) {
cairo_freelist_node_t *node;
node = freelist->first_free_node;
VG (VALGRIND_MAKE_MEM_DEFINED (node, sizeof (node->next)));
freelist->first_free_node = node->next;
VG (VALGRIND_MAKE_MEM_UNDEFINED (node, freelist->nodesize));
return node;
}
return malloc (freelist->nodesize);
}