本文整理汇总了C++中sock_owned_by_user函数的典型用法代码示例。如果您正苦于以下问题:C++ sock_owned_by_user函数的具体用法?C++ sock_owned_by_user怎么用?C++ sock_owned_by_user使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sock_owned_by_user函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: svc_reclassify_socket
static void svc_reclassify_socket(struct socket *sock)
{
struct sock *sk = sock->sk;
WARN_ON_ONCE(sock_owned_by_user(sk));
if (sock_owned_by_user(sk))
return;
switch (sk->sk_family) {
case AF_INET:
sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
&svc_slock_key[0],
"sk_xprt.xpt_lock-AF_INET-NFSD",
&svc_key[0]);
break;
case AF_INET6:
sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
&svc_slock_key[1],
"sk_xprt.xpt_lock-AF_INET6-NFSD",
&svc_key[1]);
break;
default:
BUG();
}
}
示例2: dn_slow_timer
static void dn_slow_timer(unsigned long arg)
{
struct sock *sk = (struct sock *)arg;
struct dn_scp *scp = DN_SK(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
goto out;
}
if (scp->persist && scp->persist_fxn) {
if (scp->persist <= SLOW_INTERVAL) {
scp->persist = 0;
if (scp->persist_fxn(sk))
goto out;
} else {
scp->persist -= SLOW_INTERVAL;
}
}
if (scp->keepalive && scp->keepalive_fxn && (scp->state == DN_RUN)) {
if ((jiffies - scp->stamp) >= scp->keepalive)
scp->keepalive_fxn(sk);
}
sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
out:
bh_unlock_sock(sk);
sock_put(sk);
}
示例3: dispatch
static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
{
struct sock *sk = (struct sock *)tport->usr_handle;
u32 res;
/*
* Process message if socket is unlocked; otherwise add to backlog queue
*
* This code is based on sk_receive_skb(), but must be distinct from it
* since a TIPC-specific filter/reject mechanism is utilized
*/
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
res = filter_rcv(sk, buf);
} else {
if (sk_add_backlog(sk, buf))
res = TIPC_ERR_OVERLOAD;
else
res = TIPC_OK;
}
bh_unlock_sock(sk);
return res;
}
示例4: pfq_alloc_sk_filter
struct sk_filter *
pfq_alloc_sk_filter(struct sock_fprog *fprog)
{
struct sock sk;
int rv;
sock_init_data(NULL, &sk);
sk.sk_filter = NULL;
atomic_set(&sk.sk_omem_alloc, 0);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
sock_reset_flag(&sk, SOCK_FILTER_LOCKED);
#endif
pr_devel("[PFQ] BPF: new fprog (len %d)\n", fprog->len);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,8) && LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0))
if ((rv = __sk_attach_filter(fprog, &sk, sock_owned_by_user(&sk))))
#else
if ((rv = sk_attach_filter(fprog, &sk)))
#endif
{
pr_devel("[PFQ] BPF: sk_attach_filter error: (%d)!\n", rv);
return NULL;
}
return sk.sk_filter;
}
示例5: sk_attach_filter
/**
* sk_attach_filter - attach a socket filter
* @fprog: the filter program
* @sk: the socket to use
*
* Attach the user's filter code. We first run some sanity checks on
* it to make sure it does not explode on us later. If an error
* occurs or there is insufficient memory for the filter a negative
* errno code is returned. On success the return is zero.
*/
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
struct sk_filter *fp, *old_fp;
unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
int err;
/* Make sure new filter is there and in the right amounts. */
if (fprog->filter == NULL)
return -EINVAL;
fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
if (!fp)
return -ENOMEM;
if (copy_from_user(fp->insns, fprog->filter, fsize)) {
sock_kfree_s(sk, fp, fsize+sizeof(*fp));
return -EFAULT;
}
atomic_set(&fp->refcnt, 1);
fp->len = fprog->len;
err = sk_chk_filter(fp->insns, fp->len);
if (err) {
sk_filter_uncharge(sk, fp);
return err;
}
old_fp = rcu_dereference_protected(sk->sk_filter,
sock_owned_by_user(sk));
rcu_assign_pointer(sk->sk_filter, fp);
if (old_fp)
sk_filter_uncharge(sk, old_fp);
return 0;
}
示例6: dn_slow_timer
static void dn_slow_timer(unsigned long arg)
{
struct sock *sk = (struct sock *)arg;
struct dn_scp *scp = DN_SK(sk);
sock_hold(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
sk->sk_timer.expires = jiffies + HZ / 10;
add_timer(&sk->sk_timer);
goto out;
}
/*
* The persist timer is the standard slow timer used for retransmits
* in both connection establishment and disconnection as well as
* in the RUN state. The different states are catered for by changing
* the function pointer in the socket. Setting the timer to a value
* of zero turns it off. We allow the persist_fxn to turn the
* timer off in a permant way by returning non-zero, so that
* timer based routines may remove sockets. This is why we have a
* sock_hold()/sock_put() around the timer to prevent the socket
* going away in the middle.
*/
if (scp->persist && scp->persist_fxn) {
if (scp->persist <= SLOW_INTERVAL) {
scp->persist = 0;
if (scp->persist_fxn(sk))
goto out;
} else {
scp->persist -= SLOW_INTERVAL;
}
}
/*
* Check for keepalive timeout. After the other timer 'cos if
* the previous timer caused a retransmit, we don't need to
* do this. scp->stamp is the last time that we sent a packet.
* The keepalive function sends a link service packet to the
* other end. If it remains unacknowledged, the standard
* socket timers will eventually shut the socket down. Each
* time we do this, scp->stamp will be updated, thus
* we won't try and send another until scp->keepalive has passed
* since the last successful transmission.
*/
if (scp->keepalive && scp->keepalive_fxn && (scp->state == DN_RUN)) {
if ((jiffies - scp->stamp) >= scp->keepalive)
scp->keepalive_fxn(sk);
}
sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
add_timer(&sk->sk_timer);
out:
bh_unlock_sock(sk);
sock_put(sk);
}
示例7: llc_conn_handler
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_addr saddr, daddr;
struct sock *sk;
llc_pdu_decode_sa(skb, saddr.mac);
llc_pdu_decode_ssap(skb, &saddr.lsap);
llc_pdu_decode_da(skb, daddr.mac);
llc_pdu_decode_dsap(skb, &daddr.lsap);
sk = __llc_lookup(sap, &saddr, &daddr);
if (!sk)
goto drop;
bh_lock_sock(sk);
/*
* This has to be done here and not at the upper layer ->accept
* method because of the way the PROCOM state machine works:
* it needs to set several state variables (see, for instance,
* llc_adm_actions_2 in net/llc/llc_c_st.c) and send a packet to
* the originator of the new connection, and this state has to be
* in the newly created struct sock private area. -acme
*/
if (unlikely(sk->sk_state == TCP_LISTEN)) {
struct sock *newsk = llc_create_incoming_sock(sk, skb->dev,
&saddr, &daddr);
if (!newsk)
goto drop_unlock;
skb_set_owner_r(skb, newsk);
} else {
/*
* Can't be skb_set_owner_r, this will be done at the
* llc_conn_state_process function, later on, when we will use
* skb_queue_rcv_skb to send it to upper layers, this is
* another trick required to cope with how the PROCOM state
* machine works. -acme
*/
skb->sk = sk;
}
if (!sock_owned_by_user(sk))
llc_conn_rcv(sk, skb);
else {
;
llc_set_backlog_type(skb, LLC_PACKET);
if (sk_add_backlog(sk, skb))
goto drop_unlock;
}
out:
bh_unlock_sock(sk);
sock_put(sk);
return;
drop:
kfree_skb(skb);
return;
drop_unlock:
kfree_skb(skb);
goto out;
}
示例8: sdp_poll_tx_timeout
static void sdp_poll_tx_timeout(unsigned long data)
{
struct sdp_sock *ssk = (struct sdp_sock *)data;
struct sock *sk = sk_ssk(ssk);
u32 inflight, wc_processed;
sdp_prf1(sk_ssk(ssk), NULL, "TX timeout: inflight=%d, head=%d tail=%d",
(u32) tx_ring_posted(ssk),
ring_head(ssk->tx_ring), ring_tail(ssk->tx_ring));
/* Only process if the socket is not in use */
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
sdp_prf(sk_ssk(ssk), NULL, "TX comp: socket is busy");
if (sdp_tx_handler_select(ssk) && sk->sk_state != TCP_CLOSE &&
likely(ssk->qp_active)) {
sdp_prf1(sk, NULL, "schedule a timer");
mod_timer(&ssk->tx_ring.timer, jiffies + SDP_TX_POLL_TIMEOUT);
}
SDPSTATS_COUNTER_INC(tx_poll_busy);
goto out;
}
if (unlikely(!ssk->qp || sk->sk_state == TCP_CLOSE)) {
SDPSTATS_COUNTER_INC(tx_poll_no_op);
goto out;
}
wc_processed = sdp_process_tx_cq(ssk);
if (!wc_processed)
SDPSTATS_COUNTER_INC(tx_poll_miss);
else {
sdp_post_sends(ssk, GFP_ATOMIC);
SDPSTATS_COUNTER_INC(tx_poll_hit);
}
inflight = (u32) tx_ring_posted(ssk);
sdp_prf1(sk_ssk(ssk), NULL, "finished tx proccessing. inflight = %d",
tx_ring_posted(ssk));
/* If there are still packets in flight and the timer has not already
* been scheduled by the Tx routine then schedule it here to guarantee
* completion processing of these packets */
if (inflight && likely(ssk->qp_active))
mod_timer(&ssk->tx_ring.timer, jiffies + SDP_TX_POLL_TIMEOUT);
out:
if (ssk->tx_ring.rdma_inflight && ssk->tx_ring.rdma_inflight->busy) {
sdp_prf1(sk, NULL, "RDMA is inflight - arming irq");
sdp_arm_tx_cq(sk);
}
bh_unlock_sock(sk);
}
示例9: set_tcp_sock_sync_prop
/*
* Set sock sync properties.
*/
int set_tcp_sock_sync_prop(struct bst_set_sock_sync_prop *set_prop)
{
int err = 0;
struct sock *sk;
struct bastet_sock *bsk;
struct bst_sock_comm_prop *guide = &set_prop->guide;
sk = get_sock_by_comm_prop(guide);
if (NULL == sk) {
BASTET_LOGE("can not find sock by lport: %d, lIp: %pI4, rport: %d, rIp: %pI4",
guide->local_port, &guide->local_ip,
guide->remote_port, &guide->remote_ip);
return -ENOENT;
}
if (sk->sk_state == TCP_TIME_WAIT) {
BASTET_LOGE("sk: %p not expected time wait sock", sk);
inet_twsk_put(inet_twsk(sk));
return -EPERM;
}
bsk = sk->bastet;
if (NULL == bsk) {
BASTET_LOGE("sk: %p not expected bastet null", sk);
err = -EPERM;
goto out_put;
}
BASTET_LOGI("sk: %p", sk);
spin_lock_bh(&sk->sk_lock.slock);
if (NULL != bsk->sync_p) {
BASTET_LOGE("sk: %p has a pending sock set", sk);
err = -EPERM;
goto out_unlock;
}
cancel_sock_bastet_timer(sk);
if (sock_owned_by_user(sk)) {
err = setup_sock_sync_set_timer(sk, &set_prop->sync_prop);
goto out_unlock;
}
sock_set_internal(sk, &set_prop->sync_prop);
out_unlock:
spin_unlock_bh(&sk->sk_lock.slock);
adjust_traffic_flow_by_sock(sk, set_prop->sync_prop.tx, set_prop->sync_prop.rx);
out_put:
sock_put(sk);
return err;
}
示例10: set_tcp_sock_closed
/*
* Close sock, when modem bastet fails this sock.
*/
int set_tcp_sock_closed(struct bst_sock_comm_prop *guide)
{
int err = 0;
struct sock *sk;
struct bastet_sock *bsk;
sk = get_sock_by_comm_prop(guide);
if (NULL == sk) {
BASTET_LOGE("can not find sock by lport: %d, lIp: %pI4, rport: %d, rIp: %pI4",
guide->local_port, &guide->local_ip,
guide->remote_port, &guide->remote_ip);
return -ENOENT;
}
if (sk->sk_state == TCP_TIME_WAIT) {
BASTET_LOGE("sk: %p not expected time wait sock", sk);
inet_twsk_put(inet_twsk(sk));
return -EPERM;
}
bsk = sk->bastet;
if (NULL == bsk) {
BASTET_LOGE("sk: %p not expected bastet null", sk);
err = -EPERM;
goto out_put;
}
BASTET_LOGI("sk: %p", sk);
spin_lock_bh(&sk->sk_lock.slock);
if (BST_SOCK_INVALID != bsk->bastet_sock_state
&& BST_SOCK_UPDATING != bsk->bastet_sock_state) {
BASTET_LOGE("sk: %p sync_current_state: %d not expected", sk, bsk->bastet_sock_state);
goto out_unlock;
}
cancel_sock_bastet_timer(sk);
bsk->bastet_sock_state = BST_SOCK_NOT_USED;
if (sock_owned_by_user(sk)) {
setup_sock_sync_close_timer(sk);
goto out_unlock;
}
set_sock_close_internal(sk);
out_unlock:
spin_unlock_bh(&sk->sk_lock.slock);
out_put:
sock_put(sk);
return err;
}
示例11: x25_receive_data
static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
{
struct sock *sk;
unsigned short frametype;
unsigned int lci;
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
/*
* LCI of zero is always for us, and its always a link control
* frame.
*/
if (lci == 0) {
x25_link_control(skb, nb, frametype);
return 0;
}
/*
* Find an existing socket.
*/
if ((sk = x25_find_socket(lci, nb)) != NULL) {
int queued = 1;
skb->h.raw = skb->data;
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
queued = x25_process_rx_frame(sk, skb);
} else {
sk_add_backlog(sk, skb);
}
bh_unlock_sock(sk);
return queued;
}
/*
* Is is a Call Request ? if so process it.
*/
if (frametype == X25_CALL_REQUEST)
return x25_rx_call_request(skb, nb, lci);
/*
* Its not a Call Request, nor is it a control frame.
* Let caller throw it away.
*/
/*
x25_transmit_clear_request(nb, lci, 0x0D);
*/
if (frametype != X25_CLEAR_CONFIRMATION)
printk(KERN_DEBUG "x25_receive_data(): unknown frame type %2x\n",frametype);
return 0;
}
示例12: x25_timer_expiry
static void x25_timer_expiry(unsigned long param)
{
struct sock *sk = (struct sock *)param;
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { /* can currently only occur in state 3 */
if (x25_sk(sk)->state == X25_STATE_3)
x25_start_t2timer(sk);
} else
x25_do_timer_expiry(sk);
bh_unlock_sock(sk);
}
示例13: sk_detach_filter
int sk_detach_filter(struct sock *sk)
{
int ret = -ENOENT;
struct sk_filter *filter;
filter = rcu_dereference_protected(sk->sk_filter,
sock_owned_by_user(sk));
if (filter) {
rcu_assign_pointer(sk->sk_filter, NULL);
sk_filter_uncharge(sk, filter);
ret = 0;
}
return ret;
}
示例14: x25_receive_data
static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
{
struct sock *sk;
unsigned short frametype;
unsigned int lci;
if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
return 0;
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
if (lci == 0) {
x25_link_control(skb, nb, frametype);
return 0;
}
if ((sk = x25_find_socket(lci, nb)) != NULL) {
int queued = 1;
skb_reset_transport_header(skb);
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
queued = x25_process_rx_frame(sk, skb);
} else {
queued = !sk_add_backlog(sk, skb);
}
bh_unlock_sock(sk);
sock_put(sk);
return queued;
}
if (frametype == X25_CALL_REQUEST)
return x25_rx_call_request(skb, nb, lci);
if (x25_forward_data(lci, nb, skb)) {
if (frametype == X25_CLEAR_CONFIRMATION) {
x25_clear_forward_by_lci(lci);
}
kfree_skb(skb);
return 1;
}
if (frametype != X25_CLEAR_CONFIRMATION)
printk(KERN_DEBUG "x25_receive_data(): unknown frame type %2x\n",frametype);
return 0;
}
示例15: ccid2_hc_tx_rto_expire
static void ccid2_hc_tx_rto_expire(unsigned long data)
{
struct sock *sk = (struct sock *)data;
struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
long s;
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer,
jiffies + HZ / 5);
goto out;
}
ccid2_pr_debug("RTO_EXPIRE\n");
ccid2_hc_tx_check_sanity(hctx);
/* back-off timer */
hctx->ccid2hctx_rto <<= 1;
s = hctx->ccid2hctx_rto / HZ;
if (s > 60)
hctx->ccid2hctx_rto = 60 * HZ;
ccid2_start_rto_timer(sk);
/* adjust pipe, cwnd etc */
hctx->ccid2hctx_pipe = 0;
hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd >> 1;
if (hctx->ccid2hctx_ssthresh < 2)
hctx->ccid2hctx_ssthresh = 2;
ccid2_change_cwnd(sk, 1);
/* clear state about stuff we sent */
hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqh;
hctx->ccid2hctx_ssacks = 0;
hctx->ccid2hctx_acks = 0;
hctx->ccid2hctx_sent = 0;
/* clear ack ratio state. */
hctx->ccid2hctx_arsent = 0;
hctx->ccid2hctx_ackloss = 0;
hctx->ccid2hctx_rpseq = 0;
hctx->ccid2hctx_rpdupack = -1;
ccid2_change_l_ack_ratio(sk, 1);
ccid2_hc_tx_check_sanity(hctx);
out:
bh_unlock_sock(sk);
sock_put(sk);
}