本文整理汇总了C++中skb_peek函数的典型用法代码示例。如果您正苦于以下问题:C++ skb_peek函数的具体用法?C++ skb_peek怎么用?C++ skb_peek使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了skb_peek函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: dccp_ioctl
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
int rc = -ENOTCONN;
lock_sock(sk);
if (sk->sk_state == DCCP_LISTEN)
goto out;
switch (cmd) {
case SIOCINQ: {
struct sk_buff *skb;
unsigned long amount = 0;
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL) {
/*
* We will only return the amount of this packet since
* that is all that will be read.
*/
amount = skb->len;
}
rc = put_user(amount, (int __user *)arg);
}
break;
default:
rc = -ENOIOCTLCMD;
break;
}
out:
release_sock(sk);
return rc;
}
示例2: msm_ipc_router_extract_msg
static int msm_ipc_router_extract_msg(struct msghdr *m,
struct sk_buff_head *msg_head)
{
struct sockaddr_msm_ipc *addr = (struct sockaddr_msm_ipc *)m->msg_name;
struct rr_header *hdr;
struct sk_buff *temp;
int offset = 0, data_len = 0, copy_len;
if (!m || !msg_head) {
pr_err("%s: Invalid pointers passed\n", __func__);
return -EINVAL;
}
temp = skb_peek(msg_head);
hdr = (struct rr_header *)(temp->data);
if (addr || (hdr->src_port_id != IPC_ROUTER_ADDRESS)) {
addr->family = AF_MSM_IPC;
addr->address.addrtype = MSM_IPC_ADDR_ID;
addr->address.addr.port_addr.node_id = hdr->src_node_id;
addr->address.addr.port_addr.port_id = hdr->src_port_id;
m->msg_namelen = sizeof(struct sockaddr_msm_ipc);
}
data_len = hdr->size;
skb_pull(temp, IPC_ROUTER_HDR_SIZE);
skb_queue_walk(msg_head, temp) {
copy_len = data_len < temp->len ? data_len : temp->len;
if (copy_to_user(m->msg_iov->iov_base + offset, temp->data,
copy_len)) {
pr_err("%s: Copy to user failed\n", __func__);
return -EFAULT;
}
offset += copy_len;
data_len -= copy_len;
}
示例3: aun_tx_ack
static void aun_tx_ack(unsigned long seq, int result)
{
struct sk_buff *skb;
unsigned long flags;
struct ec_cb *eb;
spin_lock_irqsave(&aun_queue_lock, flags);
skb = skb_peek(&aun_queue);
while (skb && skb != (struct sk_buff *)&aun_queue)
{
struct sk_buff *newskb = skb->next;
eb = (struct ec_cb *)&skb->cb;
if (eb->seq == seq)
goto foundit;
skb = newskb;
}
spin_unlock_irqrestore(&aun_queue_lock, flags);
printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq);
return;
foundit:
tx_result(skb->sk, eb->cookie, result);
skb_unlink(skb);
spin_unlock_irqrestore(&aun_queue_lock, flags);
kfree_skb(skb);
}
示例4: raw_ioctl
static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ: {
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ: {
struct sk_buff *skb;
int amount = 0;
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
amount = skb->len;
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
default:
#ifdef CONFIG_IP_MROUTE
return ipmr_ioctl(sk, cmd, (void __user *)arg);
#else
return -ENOIOCTLCMD;
#endif
}
}
示例5: rsi_get_num_pkts_dequeue
/**
* rsi_get_num_pkts_dequeue() - This function determines the number of
* packets to be dequeued based on the number
* of bytes calculated using txop.
*
* @common: Pointer to the driver private structure.
* @q_num: the queue from which pkts have to be dequeued
*
* Return: pkt_num: Number of pkts to be dequeued.
*/
static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
{
struct rsi_hw *adapter = common->priv;
struct sk_buff *skb;
u32 pkt_cnt = 0;
s16 txop = common->tx_qinfo[q_num].txop * 32;
__le16 r_txop;
struct ieee80211_rate rate;
rate.bitrate = RSI_RATE_MCS0 * 5 * 10; /* Convert to Kbps */
if (q_num == VI_Q)
txop = ((txop << 5) / 80);
if (skb_queue_len(&common->tx_queue[q_num]))
skb = skb_peek(&common->tx_queue[q_num]);
else
return 0;
do {
r_txop = ieee80211_generic_frame_duration(adapter->hw,
adapter->vifs[0],
common->band,
skb->len, &rate);
txop -= le16_to_cpu(r_txop);
pkt_cnt += 1;
/*checking if pkts are still there*/
if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt)
skb = skb->next;
else
break;
} while (txop > 0);
return pkt_cnt;
}
示例6: qdisc_lock
static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
{
const struct netdev_queue *txq = q->dev_queue;
spinlock_t *lock = NULL;
struct sk_buff *skb;
if (q->flags & TCQ_F_NOLOCK) {
lock = qdisc_lock(q);
spin_lock(lock);
}
skb = skb_peek(&q->skb_bad_txq);
if (skb) {
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
skb = __skb_dequeue(&q->skb_bad_txq);
if (qdisc_is_percpu_stats(q)) {
qdisc_qstats_cpu_backlog_dec(q, skb);
qdisc_qstats_cpu_qlen_dec(q);
} else {
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
}
} else {
skb = NULL;
}
}
if (lock)
spin_unlock(lock);
return skb;
}
示例7: dccp_write_xmit
void dccp_write_xmit(struct sock *sk, int block)
{
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb;
while ((skb = skb_peek(&sk->sk_write_queue))) {
int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
if (err > 0) {
if (!block) {
sk_reset_timer(sk, &dp->dccps_xmit_timer,
msecs_to_jiffies(err)+jiffies);
break;
} else
err = dccp_wait_for_ccid(sk, skb, err);
if (err && err != -EINTR)
DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
}
skb_dequeue(&sk->sk_write_queue);
if (err == 0) {
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
const int len = skb->len;
if (sk->sk_state == DCCP_PARTOPEN) {
const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
/*
* See 8.1.5 - Handshake Completion.
*
* For robustness we resend Confirm options until the client has
* entered OPEN. During the initial feature negotiation, the MPS
* is smaller than usual, reduced by the Change/Confirm options.
*/
if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
DCCP_WARN("Payload too large (%d) for featneg.\n", len);
dccp_send_ack(sk);
dccp_feat_list_purge(&dp->dccps_featneg);
}
inet_csk_schedule_ack(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
inet_csk(sk)->icsk_rto,
DCCP_RTO_MAX);
dcb->dccpd_type = DCCP_PKT_DATAACK;
} else if (dccp_ack_pending(sk))
dcb->dccpd_type = DCCP_PKT_DATAACK;
else
dcb->dccpd_type = DCCP_PKT_DATA;
err = dccp_transmit_skb(sk, skb);
ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
if (err)
DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
err);
} else {
dccp_pr_debug("packet discarded due to err=%d\n", err);
kfree_skb(skb);
}
}
}
示例8: pn_ioctl
static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
struct sk_buff *skb;
int answ;
switch (cmd) {
case SIOCINQ:
lock_sock(sk);
skb = skb_peek(&sk->sk_receive_queue);
answ = skb ? skb->len : 0;
release_sock(sk);
return put_user(answ, (int __user *)arg);
case SIOCPNADDRESOURCE:
case SIOCPNDELRESOURCE: {
u32 res;
if (get_user(res, (u32 __user *)arg))
return -EFAULT;
if (res >= 256)
return -EINVAL;
if (cmd == SIOCPNADDRESOURCE)
return pn_sock_bind_res(sk, res);
else
return pn_sock_unbind_res(sk, res);
}
}
return -ENOIOCTLCMD;
}
示例9: sock_error
/**
* __skb_recv_datagram - Receive a datagram skbuff
* @sk: socket
* @flags: MSG_ flags
* @peeked: returns non-zero if this packet has been seen before
* @err: error code returned
*
* Get a datagram skbuff, understands the peeking, nonblocking wakeups
* and possible races. This replaces identical code in packet, raw and
* udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
* the long standing peek and read race for datagram sockets. If you
* alter this routine remember it must be re-entrant.
*
* This function will lock the socket if a skb is returned, so the caller
* needs to unlock the socket in that case (usually by calling
* skb_free_datagram)
*
* * It does not lock socket since today. This function is
* * free of race conditions. This measure should/can improve
* * significantly datagram socket latencies at high loads,
* * when data copying to user space takes lots of time.
* * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
* * 8) Great win.)
* * --ANK (980729)
*
* The order of the tests when we find no data waiting are specified
* quite explicitly by POSIX 1003.1g, don't change them without having
* the standard around please.
*/
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *err)
{
struct sk_buff *skb;
long timeo;
/*
* Caller is allowed not to check sk->sk_err before skb_recv_datagram()
*/
int error = sock_error(sk);
if (error)
goto no_packet;
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
/* Again only user level code calls this function, so nothing
* interrupt level will suddenly eat the receive_queue.
*
* Look at current nfs client by the way...
* However, this function was correct in any case. 8)
*/
unsigned long cpu_flags;
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
skb = skb_peek(&sk->sk_receive_queue);
if (skb) {
*peeked = skb->peeked;
if (flags & MSG_PEEK) {
skb->peeked = 1;
atomic_inc(&skb->users);
} else{
if(!skb->next || IS_ERR(skb->next)){
printk("[NET] skb->next error in %s\n", __func__);
error = -EAGAIN;
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
goto no_packet;
}else{
__skb_unlink(skb, &sk->sk_receive_queue);
}
}
}
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
if (skb)
return skb;
/* User doesn't want to wait */
error = -EAGAIN;
if (!timeo)
goto no_packet;
} while (!wait_for_packet(sk, err, &timeo));
return NULL;
no_packet:
*err = error;
return NULL;
}
示例10: lapb_kick
void lapb_kick(struct lapb_cb *lapb)
{
struct sk_buff *skb, *skbn;
unsigned short modulus, start, end;
modulus = (lapb->mode & LAPB_EXTENDED) ? LAPB_EMODULUS : LAPB_SMODULUS;
start = !skb_peek(&lapb->ack_queue) ? lapb->va : lapb->vs;
end = (lapb->va + lapb->window) % modulus;
if (!(lapb->condition & LAPB_PEER_RX_BUSY_CONDITION) &&
start != end && skb_peek(&lapb->write_queue)) {
lapb->vs = start;
/*
* Dequeue the frame and copy it.
*/
skb = skb_dequeue(&lapb->write_queue);
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb_queue_head(&lapb->write_queue, skb);
break;
}
if (skb->sk)
skb_set_owner_w(skbn, skb->sk);
/*
* Transmit the frame copy.
*/
lapb_send_iframe(lapb, skbn, LAPB_POLLOFF);
lapb->vs = (lapb->vs + 1) % modulus;
/*
* Requeue the original data frame.
*/
skb_queue_tail(&lapb->ack_queue, skb);
} while (lapb->vs != end && (skb = skb_dequeue(&lapb->write_queue)) != NULL);
lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
if (!lapb_t1timer_running(lapb))
lapb_start_t1timer(lapb);
}
}
示例11: spin_lock
struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
{
struct cfpkt *tmp;
spin_lock(&pktq->lock);
tmp = skb_to_pkt(skb_peek(&pktq->head));
spin_unlock(&pktq->lock);
return tmp;
}
示例12: cfhsi_tx_done
static void cfhsi_tx_done(struct cfhsi *cfhsi)
{
struct cfhsi_desc *desc = NULL;
int len = 0;
int res;
dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
desc = (struct cfhsi_desc *)cfhsi->tx_buf;
do {
/*
* Send flow on if flow off has been previously signalled
* and number of packets is below low water mark.
*/
spin_lock_bh(&cfhsi->lock);
if (cfhsi->flow_off_sent &&
cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
cfhsi->cfdev.flowctrl) {
cfhsi->flow_off_sent = 0;
cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
}
spin_unlock_bh(&cfhsi->lock);
/* Create HSI frame. */
do {
len = cfhsi_tx_frm(desc, cfhsi);
if (!len) {
spin_lock_bh(&cfhsi->lock);
if (unlikely(skb_peek(&cfhsi->qhead))) {
spin_unlock_bh(&cfhsi->lock);
continue;
}
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
/* Start inactivity timer. */
mod_timer(&cfhsi->timer,
jiffies + CFHSI_INACTIVITY_TOUT);
spin_unlock_bh(&cfhsi->lock);
goto done;
}
} while (!len);
/* Set up new transfer. */
res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
if (WARN_ON(res < 0)) {
dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
__func__, res);
}
} while (res < 0);
done:
return;
}
示例13: skb_peek
static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
{
struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
if (unlikely(skb))
skb = __skb_dequeue_bad_txq(q);
return skb;
}
示例14: shaper_start_xmit
static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct shaper *shaper = dev->priv;
struct sk_buff *ptr;
spin_lock(&shaper->lock);
ptr=shaper->sendq.prev;
/*
* Set up our packet details
*/
SHAPERCB(skb)->shapelatency=0;
SHAPERCB(skb)->shapeclock=shaper->recovery;
if(time_before(SHAPERCB(skb)->shapeclock, jiffies))
SHAPERCB(skb)->shapeclock=jiffies;
skb->priority=0; /* short term bug fix */
SHAPERCB(skb)->shapestamp=jiffies;
/*
* Time slots for this packet.
*/
SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
{
struct sk_buff *tmp;
/*
* Up our shape clock by the time pending on the queue
* (Should keep this in the shaper as a variable..)
*/
for(tmp=skb_peek(&shaper->sendq); tmp!=NULL &&
tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen;
/*
* Queue over time. Spill packet.
*/
if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) {
dev_kfree_skb(skb);
shaper->stats.tx_dropped++;
} else
skb_queue_tail(&shaper->sendq, skb);
}
if(sh_debug)
printk("Frame queued.\n");
if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
{
ptr=skb_dequeue(&shaper->sendq);
dev_kfree_skb(ptr);
shaper->stats.collisions++;
}
shaper_kick(shaper);
spin_unlock(&shaper->lock);
return 0;
}
示例15: qdisc_priv
static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
{
int prio;
struct sk_buff_head *list = qdisc_priv(qdisc);
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
if (!skb_queue_empty(list + prio))
return skb_peek(list + prio);
}
return NULL;
}