本文整理汇总了C++中xchg函数的典型用法代码示例。如果您正苦于以下问题:C++ xchg函数的具体用法?C++ xchg怎么用?C++ xchg使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了xchg函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: xen_resume_notifier
static void xen_resume_notifier(int _suspend_cancelled)
{
int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING);
BUG_ON(old_state != SHUTDOWN_SUSPEND);
suspend_cancelled = _suspend_cancelled;
}
示例2: return
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
}
示例3: rt_set_trap_handler
RT_TRAP_HANDLER rt_set_trap_handler (RT_TRAP_HANDLER handler)
{
return (RT_TRAP_HANDLER)xchg(&rtai_trap_handler, handler);
}
示例4: cv_signal
void cv_signal(cond_t *cv){
t_wakeup();
xchg(&cv->cond, 1);
}
示例5: expand_fdset
/*
* Expand the fdset in the files_struct. Called with the files spinlock
* held for write.
*/
int expand_fdset(struct files_struct *files, int nr)
{
fd_set *new_openset = 0, *new_execset = 0;
int error, nfds = 0;
error = -EMFILE;
if (files->max_fdset >= NR_OPEN || nr >= NR_OPEN)
goto out;
nfds = files->max_fdset;
write_unlock(&files->file_lock);
/* Expand to the max in easy steps */
do {
if (nfds < (PAGE_SIZE * 8))
nfds = PAGE_SIZE * 8;
else {
nfds = nfds * 2;
if (nfds > NR_OPEN)
nfds = NR_OPEN;
}
} while (nfds <= nr);
error = -ENOMEM;
new_openset = alloc_fdset(nfds);
new_execset = alloc_fdset(nfds);
write_lock(&files->file_lock);
if (!new_openset || !new_execset)
goto out;
error = 0;
/* Copy the existing tables and install the new pointers */
if (nfds > files->max_fdset) {
int i = files->max_fdset / (sizeof(unsigned long) * 8);
int count = (nfds - files->max_fdset) / 8;
/*
* Don't copy the entire array if the current fdset is
* not yet initialised.
*/
if (i) {
memcpy (new_openset, files->open_fds, files->max_fdset/8);
memcpy (new_execset, files->close_on_exec, files->max_fdset/8);
memset (&new_openset->fds_bits[i], 0, count);
memset (&new_execset->fds_bits[i], 0, count);
}
nfds = xchg(&files->max_fdset, nfds);
new_openset = xchg(&files->open_fds, new_openset);
new_execset = xchg(&files->close_on_exec, new_execset);
write_unlock(&files->file_lock);
free_fdset (new_openset, nfds);
free_fdset (new_execset, nfds);
write_lock(&files->file_lock);
return 0;
}
/* Somebody expanded the array while we slept ... */
out:
write_unlock(&files->file_lock);
if (new_openset)
free_fdset(new_openset, nfds);
if (new_execset)
free_fdset(new_execset, nfds);
write_lock(&files->file_lock);
return error;
}
示例6: expand_fd_array
int expand_fd_array(struct files_struct *files, int nr)
{
struct file **new_fds;
int error, nfds;
error = -EMFILE;
if (files->max_fds >= NR_OPEN || nr >= NR_OPEN)
goto out;
nfds = files->max_fds;
write_unlock(&files->file_lock);
/*
* Expand to the max in easy steps, and keep expanding it until
* we have enough for the requested fd array size.
*/
do {
#if NR_OPEN_DEFAULT < 256
if (nfds < 256)
nfds = 256;
else
#endif
if (nfds < (PAGE_SIZE / sizeof(struct file *)))
nfds = PAGE_SIZE / sizeof(struct file *);
else {
nfds = nfds * 2;
if (nfds > NR_OPEN)
nfds = NR_OPEN;
}
} while (nfds <= nr);
error = -ENOMEM;
new_fds = alloc_fd_array(nfds);
write_lock(&files->file_lock);
if (!new_fds)
goto out;
/* Copy the existing array and install the new pointer */
if (nfds > files->max_fds) {
struct file **old_fds;
int i;
old_fds = xchg(&files->fd, new_fds);
i = xchg(&files->max_fds, nfds);
/* Don't copy/clear the array if we are creating a new
fd array for fork() */
if (i) {
memcpy(new_fds, old_fds, i * sizeof(struct file *));
/* clear the remainder of the array */
memset(&new_fds[i], 0,
(nfds-i) * sizeof(struct file *));
write_unlock(&files->file_lock);
free_fd_array(old_fds, i);
write_lock(&files->file_lock);
}
} else {
/* Somebody expanded the array while we slept ... */
write_unlock(&files->file_lock);
free_fd_array(new_fds, nfds);
write_lock(&files->file_lock);
}
error = 0;
out:
return error;
}
示例7: sys_umask
asmlinkage long sys_umask(int mask)
{
mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
return mask;
}
示例8:
void
runtime·notewakeup(Note *n)
{
runtime·xchg(&n->state, 1);
futexwakeup(&n->state, 1<<30);
}
示例9: sock_getsockopt
int sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
union {
int val;
struct linger ling;
struct timeval tm;
} v;
unsigned int lv = sizeof(int);
int len;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch(optname) {
case SO_DEBUG:
v.val = sock_flag(sk, SOCK_DBG);
break;
case SO_DONTROUTE:
v.val = sock_flag(sk, SOCK_LOCALROUTE);
break;
case SO_BROADCAST:
v.val = !!sock_flag(sk, SOCK_BROADCAST);
break;
case SO_SNDBUF:
v.val = sk->sk_sndbuf;
break;
case SO_RCVBUF:
v.val = sk->sk_rcvbuf;
break;
case SO_REUSEADDR:
v.val = sk->sk_reuse;
break;
case SO_KEEPALIVE:
v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
break;
case SO_TYPE:
v.val = sk->sk_type;
break;
case SO_ERROR:
v.val = -sock_error(sk);
if (v.val==0)
v.val = xchg(&sk->sk_err_soft, 0);
break;
case SO_OOBINLINE:
v.val = !!sock_flag(sk, SOCK_URGINLINE);
break;
case SO_NO_CHECK:
v.val = sk->sk_no_check;
break;
case SO_PRIORITY:
v.val = sk->sk_priority;
break;
case SO_LINGER:
lv = sizeof(v.ling);
v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
v.ling.l_linger = sk->sk_lingertime / HZ;
break;
case SO_BSDCOMPAT:
sock_warn_obsolete_bsdism("getsockopt");
break;
case SO_TIMESTAMP:
v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
!sock_flag(sk, SOCK_RCVTSTAMPNS);
break;
case SO_TIMESTAMPNS:
v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
break;
case SO_RCVTIMEO:
lv=sizeof(struct timeval);
if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
}
break;
//.........这里部分代码省略.........
示例10: islpci_mgt_transaction
/*
* Perform one request-response transaction to the device.
*/
int
islpci_mgt_transaction(struct net_device *ndev,
int operation, unsigned long oid,
void *senddata, int sendlen,
struct islpci_mgmtframe **recvframe)
{
islpci_private *priv = netdev_priv(ndev);
const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10);
long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies;
int err;
DEFINE_WAIT(wait);
*recvframe = NULL;
if (mutex_lock_interruptible(&priv->mgmt_lock))
return -ERESTARTSYS;
prepare_to_wait(&priv->mgmt_wqueue, &wait, TASK_UNINTERRUPTIBLE);
err = islpci_mgt_transmit(ndev, operation, oid, senddata, sendlen);
if (err)
goto out;
err = -ETIMEDOUT;
while (timeout_left > 0) {
int timeleft;
struct islpci_mgmtframe *frame;
timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies);
frame = xchg(&priv->mgmt_received, NULL);
if (frame) {
if (frame->header->oid == oid) {
*recvframe = frame;
err = 0;
goto out;
} else {
printk(KERN_DEBUG
"%s: expecting oid 0x%x, received 0x%x.\n",
ndev->name, (unsigned int) oid,
frame->header->oid);
kfree(frame);
frame = NULL;
}
}
if (timeleft == 0) {
printk(KERN_DEBUG
"%s: timeout waiting for mgmt response %lu, "
"triggering device\n",
ndev->name, timeout_left);
islpci_trigger(priv);
}
timeout_left += timeleft - wait_cycle_jiffies;
}
printk(KERN_WARNING "%s: timeout waiting for mgmt response\n",
ndev->name);
/* TODO: we should reset the device here */
out:
finish_wait(&priv->mgmt_wqueue, &wait);
mutex_unlock(&priv->mgmt_lock);
return err;
}
示例11: islpci_mgt_receive
//.........这里部分代码省略.........
* We appear to have no way to tell the device the
* size of a receive buffer. Thus, if this check
* triggers, we likely have kernel heap corruption. */
if (frag_len > MGMT_FRAME_SIZE) {
printk(KERN_WARNING
"%s: Bogus packet size of %d (%#x).\n",
ndev->name, frag_len, frag_len);
frag_len = MGMT_FRAME_SIZE;
}
/* Ensure the results of device DMA are visible to the CPU. */
pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr,
buf->size, PCI_DMA_FROMDEVICE);
/* Perform endianess conversion for PIMFOR header in-place. */
header = pimfor_decode_header(buf->mem, frag_len);
if (!header) {
printk(KERN_WARNING "%s: no PIMFOR header found\n",
ndev->name);
continue;
}
/* The device ID from the PIMFOR packet received from
* the MVC is always 0. We forward a sensible device_id.
* Not that anyone upstream would care... */
header->device_id = priv->ndev->ifindex;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_PIMFOR_FRAMES,
"PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n",
header->operation, header->oid, header->device_id,
header->flags, header->length);
/* display the buffer contents for debugging */
display_buffer((char *) header, PIMFOR_HEADER_SIZE);
display_buffer((char *) header + PIMFOR_HEADER_SIZE,
header->length);
#endif
/* nobody sends these */
if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) {
printk(KERN_DEBUG
"%s: errant PIMFOR application frame\n",
ndev->name);
continue;
}
/* Determine frame size, skipping OID_INL_TUNNEL headers. */
size = PIMFOR_HEADER_SIZE + header->length;
frame = kmalloc(sizeof (struct islpci_mgmtframe) + size,
GFP_ATOMIC);
if (!frame) {
printk(KERN_WARNING
"%s: Out of memory, cannot handle oid 0x%08x\n",
ndev->name, header->oid);
continue;
}
frame->ndev = ndev;
memcpy(&frame->buf, header, size);
frame->header = (pimfor_header_t *) frame->buf;
frame->data = frame->buf + PIMFOR_HEADER_SIZE;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_PIMFOR_FRAMES,
"frame: header: %p, data: %p, size: %d\n",
frame->header, frame->data, size);
#endif
if (header->operation == PIMFOR_OP_TRAP) {
#if VERBOSE > SHOW_ERROR_MESSAGES
printk(KERN_DEBUG
"TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
header->oid, header->device_id, header->flags,
header->length);
#endif
/* Create work to handle trap out of interrupt
* context. */
INIT_WORK(&frame->ws, prism54_process_trap);
schedule_work(&frame->ws);
} else {
/* Signal the one waiting process that a response
* has been received. */
if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) {
printk(KERN_WARNING
"%s: mgmt response not collected\n",
ndev->name);
kfree(frame);
}
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n");
#endif
wake_up(&priv->mgmt_wqueue);
}
}
return 0;
}
示例12: slock_acquire
void slock_acquire(slock_t* lock)
{
while(xchg(&lock->val, 1) == 1);
}
示例13: install_os_hooks
static int install_os_hooks(void)
{
int ret;
/* register module state change notifier */
nb_init.notifier_call = module_init_notifier;
ret = register_module_notifier(&nb_init);
if (ret != 0)
{
printk(KERN_ERR "[CPA] register_module_notifier() fails\n");
return -EFAULT;
}
#ifdef CONFIG_PROFILING
ret = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
if (ret != 0)
{
printk(KERN_ERR "[CPA] profile_event_register() fails\n");
return -EFAULT;
}
#endif
/* hook necessary system call table */
px_original_sys_fork = (sys_fork_t) xchg(&system_call_table[__NR_fork - __NR_SYSCALL_BASE], px_sys_fork);
px_original_sys_vfork = (sys_vfork_t) xchg(&system_call_table[__NR_vfork - __NR_SYSCALL_BASE], px_sys_vfork);
px_original_sys_clone = (sys_clone_t) xchg(&system_call_table[__NR_clone - __NR_SYSCALL_BASE], px_sys_clone);
px_original_sys_execve = (sys_execve_t) xchg(&system_call_table[__NR_execve - __NR_SYSCALL_BASE], px_sys_execve);
px_original_sys_mmap = (sys_mmap_t) xchg(&system_call_table[__NR_mmap - __NR_SYSCALL_BASE], px_sys_mmap);
px_original_sys_mmap2 = (sys_mmap2_t) xchg(&system_call_table[__NR_mmap2 - __NR_SYSCALL_BASE], px_sys_mmap2);
px_original_sys_exit = (sys_exit_t) xchg(&system_call_table[__NR_exit - __NR_SYSCALL_BASE], px_sys_exit);
px_original_sys_exit_group = (sys_exit_group_t) xchg(&system_call_table[__NR_exit_group - __NR_SYSCALL_BASE], px_sys_exit_group);
px_original_sys_kill = (sys_kill_t) xchg(&system_call_table[__NR_kill - __NR_SYSCALL_BASE], px_sys_kill);
px_original_sys_tkill = (sys_tkill_t) xchg(&system_call_table[__NR_tkill - __NR_SYSCALL_BASE], px_sys_tkill);
px_original_sys_tgkill = (sys_tgkill_t) xchg(&system_call_table[__NR_tgkill - __NR_SYSCALL_BASE], px_sys_tgkill);
px_original_sys_prctl = (sys_prctl_t) xchg(&system_call_table[__NR_prctl - __NR_SYSCALL_BASE], px_sys_prctl);
gb_enable_os_hooks = true;
return 0;
}
示例14: do_ipv6_setsockopt
static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct net *net = sock_net(sk);
int val, valbool;
int retv = -ENOPROTOOPT;
if (optval == NULL)
val=0;
else {
if (optlen >= sizeof(int)) {
if (get_user(val, (int __user *) optval))
return -EFAULT;
} else
val = 0;
}
valbool = (val!=0);
if (ip6_mroute_opt(optname))
return ip6_mroute_setsockopt(sk, optname, optval, optlen);
lock_sock(sk);
switch (optname) {
case IPV6_ADDRFORM:
if (optlen < sizeof(int))
goto e_inval;
if (val == PF_INET) {
struct ipv6_txoptions *opt;
struct sk_buff *pktopt;
if (sk->sk_type == SOCK_RAW)
break;
if (sk->sk_protocol == IPPROTO_UDP ||
sk->sk_protocol == IPPROTO_UDPLITE) {
struct udp_sock *up = udp_sk(sk);
if (up->pending == AF_INET6) {
retv = -EBUSY;
break;
}
} else if (sk->sk_protocol != IPPROTO_TCP)
break;
if (sk->sk_state != TCP_ESTABLISHED) {
retv = -ENOTCONN;
break;
}
if (ipv6_only_sock(sk) ||
!ipv6_addr_v4mapped(&np->daddr)) {
retv = -EADDRNOTAVAIL;
break;
}
fl6_free_socklist(sk);
ipv6_sock_mc_close(sk);
/*
* Sock is moving from IPv6 to IPv4 (sk_prot), so
* remove it from the refcnt debug socks count in the
* original family...
*/
sk_refcnt_debug_dec(sk);
if (sk->sk_protocol == IPPROTO_TCP) {
struct inet_connection_sock *icsk = inet_csk(sk);
local_bh_disable();
sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_prot_inuse_add(net, &tcp_prot, 1);
local_bh_enable();
sk->sk_prot = &tcp_prot;
icsk->icsk_af_ops = &ipv4_specific;
sk->sk_socket->ops = &inet_stream_ops;
sk->sk_family = PF_INET;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
} else {
struct proto *prot = &udp_prot;
if (sk->sk_protocol == IPPROTO_UDPLITE)
prot = &udplite_prot;
local_bh_disable();
sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_prot_inuse_add(net, prot, 1);
local_bh_enable();
sk->sk_prot = prot;
sk->sk_socket->ops = &inet_dgram_ops;
sk->sk_family = PF_INET;
}
opt = xchg(&np->opt, NULL);
if (opt)
sock_kfree_s(sk, opt, opt->tot_len);
pktopt = xchg(&np->pktoptions, NULL);
kfree_skb(pktopt);
sk->sk_destruct = inet_sock_destruct;
/*
//.........这里部分代码省略.........
示例15: ulock_release
// Releases a spinlock
void
ulock_release(ulock_t * lock)
{
xchg(&lock->locked, 0);
}