本文整理汇总了C++中rcu_dereference函数的典型用法代码示例。如果您正苦于以下问题:C++ rcu_dereference函数的具体用法?C++ rcu_dereference怎么用?C++ rcu_dereference使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rcu_dereference函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: afs_check_permit
/*
* check with the fileserver to see if the directory or parent directory is
* permitted to be accessed with this authorisation, and if so, what access it
* is granted
*/
static int afs_check_permit(struct afs_vnode *vnode, struct key *key,
afs_access_t *_access)
{
struct afs_permits *permits;
struct afs_permit *permit;
struct afs_vnode *auth_vnode;
bool valid;
int loop, ret;
_enter("{%x:%u},%x",
vnode->fid.vid, vnode->fid.vnode, key_serial(key));
auth_vnode = afs_get_auth_inode(vnode, key);
if (IS_ERR(auth_vnode)) {
*_access = 0;
_leave(" = %ld", PTR_ERR(auth_vnode));
return PTR_ERR(auth_vnode);
}
ASSERT(S_ISDIR(auth_vnode->vfs_inode.i_mode));
/* check the permits to see if we've got one yet */
if (key == auth_vnode->volume->cell->anonymous_key) {
_debug("anon");
*_access = auth_vnode->status.anon_access;
valid = true;
} else {
valid = false;
rcu_read_lock();
permits = rcu_dereference(auth_vnode->permits);
if (permits) {
permit = permits->permits;
for (loop = permits->count; loop > 0; loop--) {
if (permit->key == key) {
_debug("found in cache");
*_access = permit->access_mask;
valid = true;
break;
}
permit++;
}
}
rcu_read_unlock();
}
if (!valid) {
/* check the status on the file we're actually interested in
* (the post-processing will cache the result on auth_vnode) */
_debug("no valid permit");
set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
ret = afs_vnode_fetch_status(vnode, auth_vnode, key);
if (ret < 0) {
iput(&auth_vnode->vfs_inode);
*_access = 0;
_leave(" = %d", ret);
return ret;
}
*_access = vnode->status.caller_access;
}
iput(&auth_vnode->vfs_inode);
_leave(" = 0 [access %x]", *_access);
return 0;
}
示例2: handle_tx
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
{
struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
unsigned head, out, in, s;
struct msghdr msg = {
.msg_name = NULL,
.msg_namelen = 0,
.msg_control = NULL,
.msg_controllen = 0,
.msg_iov = vq->iov,
.msg_flags = MSG_DONTWAIT,
};
size_t len, total_len = 0;
int err, wmem;
size_t hdr_size;
struct socket *sock = rcu_dereference(vq->private_data);
if (!sock)
return;
wmem = atomic_read(&sock->sk->sk_wmem_alloc);
if (wmem >= sock->sk->sk_sndbuf) {
mutex_lock(&vq->mutex);
tx_poll_start(net, sock);
mutex_unlock(&vq->mutex);
return;
}
use_mm(net->dev.mm);
mutex_lock(&vq->mutex);
vhost_disable_notify(vq);
if (wmem < sock->sk->sk_sndbuf * 2)
tx_poll_stop(net);
hdr_size = vq->hdr_size;
for (;;) {
head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
ARRAY_SIZE(vq->iov),
&out, &in,
NULL, NULL);
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
wmem = atomic_read(&sock->sk->sk_wmem_alloc);
if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
tx_poll_start(net, sock);
set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
break;
}
if (unlikely(vhost_enable_notify(vq))) {
vhost_disable_notify(vq);
continue;
}
break;
}
if (in) {
vq_err(vq, "Unexpected descriptor format for TX: "
"out %d, int %d\n", out, in);
break;
}
/* Skip header. TODO: support TSO. */
s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
msg.msg_iovlen = out;
len = iov_length(vq->iov, out);
/* Sanity check */
if (!len) {
vq_err(vq, "Unexpected header len for TX: "
"%zd expected %zd\n",
iov_length(vq->hdr, s), hdr_size);
break;
}
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) {
vhost_discard_vq_desc(vq);
tx_poll_start(net, sock);
break;
}
if (err != len)
pr_err("Truncated TX packet: "
" len %d != %zd\n", err, len);
vhost_add_used_and_signal(&net->dev, vq, head, 0);
total_len += len;
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
vhost_poll_queue(&vq->poll);
break;
}
}
mutex_unlock(&vq->mutex);
unuse_mm(net->dev.mm);
}
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_rx(struct vhost_net *net)
{
struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
unsigned head, out, in, log, s;
//.........这里部分代码省略.........
示例3: ip6_input_finish
static inline int ip6_input_finish(struct sk_buff *skb)
{
struct inet6_protocol *ipprot;
struct sock *raw_sk;
unsigned int nhoff;
int nexthdr;
u8 hash;
struct inet6_dev *idev;
/*
* Parse extension headers
*/
rcu_read_lock();
resubmit:
idev = ip6_dst_idev(skb->dst);
if (!pskb_pull(skb, skb->h.raw - skb->data))
goto discard;
nhoff = IP6CB(skb)->nhoff;
nexthdr = skb->nh.raw[nhoff];
raw_sk = sk_head(&raw_v6_htable[nexthdr & (MAX_INET_PROTOS - 1)]);
if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
raw_sk = NULL;
hash = nexthdr & (MAX_INET_PROTOS - 1);
if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) {
int ret;
if (ipprot->flags & INET6_PROTO_FINAL) {
struct ipv6hdr *hdr;
/* Free reference early: we don't need it any more,
and it may hold ip_conntrack module loaded
indefinitely. */
nf_reset(skb);
skb_postpull_rcsum(skb, skb->nh.raw,
skb->h.raw - skb->nh.raw);
hdr = skb->nh.ipv6h;
if (ipv6_addr_is_multicast(&hdr->daddr) &&
!ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
&hdr->saddr) &&
!ipv6_is_mld(skb, nexthdr))
goto discard;
}
if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
ret = ipprot->handler(&skb);
if (ret > 0)
goto resubmit;
else if (ret == 0)
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS);
} else {
if (!raw_sk) {
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INUNKNOWNPROTOS);
icmpv6_send(skb, ICMPV6_PARAMPROB,
ICMPV6_UNK_NEXTHDR, nhoff,
skb->dev);
}
} else
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS);
kfree_skb(skb);
}
rcu_read_unlock();
return 0;
discard:
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS);
rcu_read_unlock();
kfree_skb(skb);
return 0;
}
示例4: nf_nat_ftp_init
static int __init nf_nat_ftp_init(void)
{
BUG_ON(rcu_dereference(nf_nat_ftp_hook));
rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp);
return 0;
}
示例5: construct_get_dest_keyring
/*
* Get the appropriate destination keyring for the request.
*
* The keyring selected is returned with an extra reference upon it which the
* caller must release.
*/
static void construct_get_dest_keyring(struct key **_dest_keyring)
{
struct request_key_auth *rka;
const struct cred *cred = current_cred();
struct key *dest_keyring = *_dest_keyring, *authkey;
kenter("%p", dest_keyring);
/* find the appropriate keyring */
if (dest_keyring) {
/* the caller supplied one */
key_get(dest_keyring);
} else {
/* use a default keyring; falling through the cases until we
* find one that we actually have */
switch (cred->jit_keyring) {
case KEY_REQKEY_DEFL_DEFAULT:
case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
if (cred->request_key_auth) {
authkey = cred->request_key_auth;
down_read(&authkey->sem);
rka = authkey->payload.data;
if (!test_bit(KEY_FLAG_REVOKED,
&authkey->flags))
dest_keyring =
key_get(rka->dest_keyring);
up_read(&authkey->sem);
if (dest_keyring)
break;
}
case KEY_REQKEY_DEFL_THREAD_KEYRING:
dest_keyring = key_get(cred->thread_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
dest_keyring = key_get(cred->tgcred->process_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_SESSION_KEYRING:
rcu_read_lock();
dest_keyring = key_get(
rcu_dereference(cred->tgcred->session_keyring));
rcu_read_unlock();
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
dest_keyring =
key_get(cred->user->session_keyring);
break;
case KEY_REQKEY_DEFL_USER_KEYRING:
dest_keyring = key_get(cred->user->uid_keyring);
break;
case KEY_REQKEY_DEFL_GROUP_KEYRING:
default:
BUG();
}
}
*_dest_keyring = dest_keyring;
kleave(" [dk %d]", key_serial(dest_keyring));
return;
}
示例6: rcu_dereference
static inline struct net_device *bridge_parent(const struct net_device *dev)
{
struct net_bridge_port *port = rcu_dereference(dev->br_port);
return port ? port->br->dev : NULL;
}
示例7: mpls_forward
static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct net *net = dev_net(dev);
struct mpls_shim_hdr *hdr;
struct mpls_route *rt;
struct mpls_entry_decoded dec;
struct net_device *out_dev;
struct mpls_dev *mdev;
unsigned int hh_len;
unsigned int new_header_size;
unsigned int mtu;
int err;
/* Careful this entire function runs inside of an rcu critical section */
mdev = mpls_dev_get(dev);
if (!mdev || !mdev->input_enabled)
goto drop;
if (skb->pkt_type != PACKET_HOST)
goto drop;
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
goto drop;
if (!pskb_may_pull(skb, sizeof(*hdr)))
goto drop;
/* Read and decode the label */
hdr = mpls_hdr(skb);
dec = mpls_entry_decode(hdr);
/* Pop the label */
skb_pull(skb, sizeof(*hdr));
skb_reset_network_header(skb);
skb_orphan(skb);
rt = mpls_route_input_rcu(net, dec.label);
if (!rt)
goto drop;
/* Find the output device */
out_dev = rcu_dereference(rt->rt_dev);
if (!mpls_output_possible(out_dev))
goto drop;
if (skb_warn_if_lro(skb))
goto drop;
skb_forward_csum(skb);
/* Verify ttl is valid */
if (dec.ttl <= 1)
goto drop;
dec.ttl -= 1;
/* Verify the destination can hold the packet */
new_header_size = mpls_rt_header_size(rt);
mtu = mpls_dev_mtu(out_dev);
if (mpls_pkt_too_big(skb, mtu - new_header_size))
goto drop;
hh_len = LL_RESERVED_SPACE(out_dev);
if (!out_dev->header_ops)
hh_len = 0;
/* Ensure there is enough space for the headers in the skb */
if (skb_cow(skb, hh_len + new_header_size))
goto drop;
skb->dev = out_dev;
skb->protocol = htons(ETH_P_MPLS_UC);
if (unlikely(!new_header_size && dec.bos)) {
/* Penultimate hop popping */
if (!mpls_egress(rt, skb, dec))
goto drop;
} else {
bool bos;
int i;
skb_push(skb, new_header_size);
skb_reset_network_header(skb);
/* Push the new labels */
hdr = mpls_hdr(skb);
bos = dec.bos;
for (i = rt->rt_labels - 1; i >= 0; i--) {
hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos);
bos = false;
}
}
err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb);
if (err)
net_dbg_ratelimited("%s: packet transmission failed: %d\n",
__func__, err);
return 0;
drop:
//.........这里部分代码省略.........
示例8: rcu_dereference
static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
{
struct net_bridge_port *port = rcu_dereference(dev->br_port);
return port ? &port->br->fake_rtable : NULL;
}
示例9: help
//.........这里部分代码省略.........
/* If packet is coming from IRC server */
if (dir == IP_CT_DIR_REPLY)
return NF_ACCEPT;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
/* Not a full tcp header? */
th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return NF_ACCEPT;
/* No data? */
dataoff = protoff + th->doff*4;
if (dataoff >= skb->len)
return NF_ACCEPT;
spin_lock_bh(&irc_buffer_lock);
ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff,
irc_buffer);
BUG_ON(ib_ptr == NULL);
data = ib_ptr;
data_limit = ib_ptr + skb->len - dataoff;
/* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
* 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
while (data < data_limit - (19 + MINMATCHLEN)) {
if (memcmp(data, "\1DCC ", 5)) {
data++;
continue;
}
data += 5;
/* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
iph = ip_hdr(skb);
pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
&iph->saddr, ntohs(th->source),
&iph->daddr, ntohs(th->dest));
for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
/* no match */
continue;
}
data += strlen(dccprotos[i]);
pr_debug("DCC %s detected\n", dccprotos[i]);
/* we have at least
* (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
* data left (== 14/13 bytes) */
if (parse_dcc((char *)data, data_limit, &dcc_ip,
&dcc_port, &addr_beg_p, &addr_end_p)) {
pr_debug("unable to parse dcc command\n");
continue;
}
pr_debug("DCC bound ip/port: %pI4:%u\n",
&dcc_ip, dcc_port);
/* dcc_ip can be the internal OR external (NAT'ed) IP */
tuple = &ct->tuplehash[dir].tuple;
if (tuple->src.u3.ip != htonl(dcc_ip) &&
tuple->dst.u3.ip != htonl(dcc_ip)) {
if (net_ratelimit())
printk(KERN_WARNING
"Forged DCC command from %pI4: %pI4:%u\n",
&tuple->src.u3.ip,
&dcc_ip, dcc_port);
continue;
}
exp = nf_conntrack_expect_alloc(ct);
if (exp == NULL) {
ret = NF_DROP;
goto out;
}
tuple = &ct->tuplehash[!dir].tuple;
port = htons(dcc_port);
nf_conntrack_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
tuple->src.l3num,
NULL, &tuple->dst.u3,
IPPROTO_TCP, NULL, &port);
nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
if (nf_nat_irc && ct->status & IPS_NAT_MASK)
ret = nf_nat_irc(skb, ctinfo,
addr_beg_p - ib_ptr,
addr_end_p - addr_beg_p,
exp);
else if (nf_conntrack_expect_related(exp) != 0)
ret = NF_DROP;
nf_conntrack_expect_put(exp);
goto out;
}
}
out:
spin_unlock_bh(&irc_buffer_lock);
return ret;
}
示例10: netdev_open
static int netdev_open(struct net_device *pnetdev)
{
uint status;
_adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev);
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
RT_TRACE(_module_os_intfs_c_,_drv_info_,("+871x_drv - dev_open\n"));
DBG_8192C("+871x_drv - drv_open, bup=%d\n", padapter->bup);
if(pwrctrlpriv->ps_flag == _TRUE){
padapter->net_closed = _FALSE;
goto netdev_open_normal_process;
}
if(padapter->bup == _FALSE)
{
padapter->bDriverStopped = _FALSE;
padapter->bSurpriseRemoved = _FALSE;
padapter->bCardDisableWOHSM = _FALSE;
status = rtw_hal_init(padapter);
if (status ==_FAIL)
{
RT_TRACE(_module_os_intfs_c_,_drv_err_,("rtl871x_hal_init(): Can't init h/w!\n"));
goto netdev_open_error;
}
DBG_8192C("MAC Address = "MAC_FMT"\n", MAC_ARG(pnetdev->dev_addr));
status=rtw_start_drv_threads(padapter);
if(status ==_FAIL)
{
RT_TRACE(_module_os_intfs_c_,_drv_err_,("Initialize driver software resource Failed!\n"));
goto netdev_open_error;
}
if (init_hw_mlme_ext(padapter) == _FAIL)
{
RT_TRACE(_module_os_intfs_c_,_drv_err_,("can't init mlme_ext_priv\n"));
goto netdev_open_error;
}
#ifdef CONFIG_DRVEXT_MODULE
init_drvext(padapter);
#endif
if(padapter->intf_start)
{
padapter->intf_start(padapter);
}
#ifdef CONFIG_PROC_DEBUG
#ifndef RTK_DMP_PLATFORM
rtw_proc_init_one(pnetdev);
#endif
#endif
rtw_led_control(padapter, LED_CTL_NO_LINK);
padapter->bup = _TRUE;
}
padapter->net_closed = _FALSE;
_set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);
if(( pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE ) ||(padapter->pwrctrlpriv.bHWPwrPindetect))
{
padapter->pwrctrlpriv.bips_processing = _FALSE;
rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
}
//netif_carrier_on(pnetdev);//call this func when rtw_joinbss_event_callback return success
if(!netif_queue_stopped(pnetdev))
netif_start_queue(pnetdev);
else
netif_wake_queue(pnetdev);
#ifdef CONFIG_BR_EXT
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
rcu_read_lock();
#endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
//if(check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE)
{
//struct net_bridge *br = pnetdev->br_port->br;//->dev->dev_addr;
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
if (pnetdev->br_port)
#else // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
if (rcu_dereference(padapter->pnetdev->rx_handler_data))
#endif // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
{
struct net_device *br_netdev;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
br_netdev = dev_get_by_name(CONFIG_BR_EXT_BRNAME);
#else // (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
struct net *devnet = NULL;
//.........这里部分代码省略.........
示例11: help_out
static inline int
help_out(struct sk_buff *skb, unsigned char *rb_ptr, unsigned int datalen,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
struct ip_ct_rtsp_expect expinfo;
int dir = CTINFO2DIR(ctinfo); /* = IP_CT_DIR_ORIGINAL */
//struct tcphdr* tcph = (void*)iph + iph->ihl * 4;
//uint tcplen = pktlen - iph->ihl * 4;
char* pdata = rb_ptr;
//uint datalen = tcplen - tcph->doff * 4;
uint dataoff = 0;
int ret = NF_ACCEPT;
struct nf_conntrack_expect *exp;
__be16 be_loport;
typeof(nf_nat_rtsp_hook) nf_nat_rtsp;
memset(&expinfo, 0, sizeof(expinfo));
while (dataoff < datalen) {
uint cmdoff = dataoff;
uint hdrsoff = 0;
uint hdrslen = 0;
uint cseqoff = 0;
uint cseqlen = 0;
uint transoff = 0;
uint translen = 0;
uint off;
if (!rtsp_parse_message(pdata, datalen, &dataoff,
&hdrsoff, &hdrslen,
&cseqoff, &cseqlen,
&transoff, &translen))
break; /* not a valid message */
if (strncmp(pdata+cmdoff, "SETUP ", 6) != 0)
continue; /* not a SETUP message */
pr_debug("found a setup message\n");
off = 0;
if(translen) {
rtsp_parse_transport(pdata+transoff, translen, &expinfo);
}
if (expinfo.loport == 0) {
pr_debug("no udp transports found\n");
continue; /* no udp transports found */
}
pr_debug("udp transport found, ports=(%d,%hu,%hu)\n",
(int)expinfo.pbtype, expinfo.loport, expinfo.hiport);
exp = nf_ct_expect_alloc(ct);
if (!exp) {
ret = NF_DROP;
goto out;
}
be_loport = htons(expinfo.loport);
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
/* media stream source can be different from the RTSP server address */
// &ct->tuplehash[!dir].tuple.src.u3, &ct->tuplehash[!dir].tuple.dst.u3,
NULL, &ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_UDP, NULL, &be_loport);
exp->master = ct;
exp->expectfn = expected;
exp->flags = 0;
if (expinfo.pbtype == pb_range) {
pr_debug("Changing expectation mask to handle multiple ports\n");
//exp->mask.dst.u.udp.port = 0xfffe;
}
pr_debug("expect_related %pI4:%u-%pI4:%u\n",
&exp->tuple.src.u3.ip,
ntohs(exp->tuple.src.u.udp.port),
&exp->tuple.dst.u3.ip,
ntohs(exp->tuple.dst.u.udp.port));
nf_nat_rtsp = rcu_dereference(nf_nat_rtsp_hook);
if (nf_nat_rtsp && ct->status & IPS_NAT_MASK)
/* pass the request off to the nat helper */
ret = nf_nat_rtsp(skb, ctinfo, hdrsoff, hdrslen, &expinfo, exp);
else if (nf_ct_expect_related(exp) != 0) {
pr_info("nf_conntrack_expect_related failed\n");
ret = NF_DROP;
}
nf_ct_expect_put(exp);
goto out;
}
out:
return ret;
}
示例12: nfulnl_log_packet
/* log handler for internal netfilter logging api */
void
nfulnl_log_packet(struct net *net,
u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *li_user,
const char *prefix)
{
size_t size;
unsigned int data_len;
struct nfulnl_instance *inst;
const struct nf_loginfo *li;
unsigned int qthreshold;
unsigned int plen;
struct nfnl_log_net *log = nfnl_log_pernet(net);
const struct nfnl_ct_hook *nfnl_ct = NULL;
struct nf_conn *ct = NULL;
enum ip_conntrack_info uninitialized_var(ctinfo);
if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
li = li_user;
else
li = &default_loginfo;
inst = instance_lookup_get(log, li->u.ulog.group);
if (!inst)
return;
plen = 0;
if (prefix)
plen = strlen(prefix) + 1;
/* FIXME: do we want to make the size calculation conditional based on
* what is actually present? way more branches and checks, but more
* memory efficient... */
size = nlmsg_total_size(sizeof(struct nfgenmsg))
+ nla_total_size(sizeof(struct nfulnl_msg_packet_hdr))
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
#endif
+ nla_total_size(sizeof(u_int32_t)) /* mark */
+ nla_total_size(sizeof(u_int32_t)) /* uid */
+ nla_total_size(sizeof(u_int32_t)) /* gid */
+ nla_total_size(plen) /* prefix */
+ nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
+ nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp))
+ nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */
if (in && skb_mac_header_was_set(skb)) {
size += nla_total_size(skb->dev->hard_header_len)
+ nla_total_size(sizeof(u_int16_t)) /* hwtype */
+ nla_total_size(sizeof(u_int16_t)); /* hwlen */
}
spin_lock_bh(&inst->lock);
if (inst->flags & NFULNL_CFG_F_SEQ)
size += nla_total_size(sizeof(u_int32_t));
if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
size += nla_total_size(sizeof(u_int32_t));
if (inst->flags & NFULNL_CFG_F_CONNTRACK) {
nfnl_ct = rcu_dereference(nfnl_ct_hook);
if (nfnl_ct != NULL) {
ct = nfnl_ct->get_ct(skb, &ctinfo);
if (ct != NULL)
size += nfnl_ct->build_size(ct);
}
}
qthreshold = inst->qthreshold;
/* per-rule qthreshold overrides per-instance */
if (li->u.ulog.qthreshold)
if (qthreshold > li->u.ulog.qthreshold)
qthreshold = li->u.ulog.qthreshold;
switch (inst->copy_mode) {
case NFULNL_COPY_META:
case NFULNL_COPY_NONE:
data_len = 0;
break;
case NFULNL_COPY_PACKET:
if (inst->copy_range > skb->len)
data_len = skb->len;
else
data_len = inst->copy_range;
size += nla_total_size(data_len);
break;
case NFULNL_COPY_DISABLED:
default:
goto unlock_and_release;
//.........这里部分代码省略.........
示例13: pptp_inbound_pkt
//.........这里部分代码省略.........
case PPTP_STOP_SESSION_REPLY:
/* server confirms end of control session */
if (info->sstate > PPTP_SESSION_STOPREQ)
goto invalid;
if (pptpReq->strep.resultCode == PPTP_STOP_OK)
info->sstate = PPTP_SESSION_NONE;
else
info->sstate = PPTP_SESSION_ERROR;
break;
case PPTP_OUT_CALL_REPLY:
/* server accepted call, we now expect GRE frames */
if (info->sstate != PPTP_SESSION_CONFIRMED)
goto invalid;
if (info->cstate != PPTP_CALL_OUT_REQ &&
info->cstate != PPTP_CALL_OUT_CONF)
goto invalid;
cid = pptpReq->ocack.callID;
pcid = pptpReq->ocack.peersCallID;
if (info->pns_call_id != pcid)
goto invalid;
pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
ntohs(cid), ntohs(pcid));
if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
info->cstate = PPTP_CALL_OUT_CONF;
info->pac_call_id = cid;
exp_gre(ct, cid, pcid);
} else
info->cstate = PPTP_CALL_NONE;
break;
case PPTP_IN_CALL_REQUEST:
/* server tells us about incoming call request */
if (info->sstate != PPTP_SESSION_CONFIRMED)
goto invalid;
cid = pptpReq->icreq.callID;
pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
info->cstate = PPTP_CALL_IN_REQ;
info->pac_call_id = cid;
break;
case PPTP_IN_CALL_CONNECT:
/* server tells us about incoming call established */
if (info->sstate != PPTP_SESSION_CONFIRMED)
goto invalid;
if (info->cstate != PPTP_CALL_IN_REP &&
info->cstate != PPTP_CALL_IN_CONF)
goto invalid;
pcid = pptpReq->iccon.peersCallID;
cid = info->pac_call_id;
if (info->pns_call_id != pcid)
goto invalid;
pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid));
info->cstate = PPTP_CALL_IN_CONF;
/* we expect a GRE connection from PAC to PNS */
exp_gre(ct, cid, pcid);
break;
case PPTP_CALL_DISCONNECT_NOTIFY:
/* server confirms disconnect */
cid = pptpReq->disc.callID;
pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
info->cstate = PPTP_CALL_NONE;
/* untrack this call id, unexpect GRE packets */
pptp_destroy_siblings(ct);
break;
case PPTP_WAN_ERROR_NOTIFY:
case PPTP_SET_LINK_INFO:
case PPTP_ECHO_REQUEST:
case PPTP_ECHO_REPLY:
/* I don't have to explain these ;) */
break;
default:
goto invalid;
}
nf_nat_pptp_inbound = rcu_dereference(nf_nat_pptp_hook_inbound);
if (nf_nat_pptp_inbound && ct->status & IPS_NAT_MASK)
return nf_nat_pptp_inbound(skb, ct, ctinfo,
protoff, ctlh, pptpReq);
return NF_ACCEPT;
invalid:
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
return NF_ACCEPT;
}
示例14: ip_local_deliver_finish
static int ip_local_deliver_finish(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
__skb_pull(skb, ip_hdrlen(skb));
/* Point into the IP datagram, just past the header. */
skb_reset_transport_header(skb);
rcu_read_lock();
{
int protocol = ip_hdr(skb)->protocol;
int hash, raw;
struct net_protocol *ipprot;
resubmit:
raw = raw_local_deliver(skb, protocol);
hash = protocol & (MAX_INET_PROTOS - 1);
ipprot = rcu_dereference(inet_protos[hash]);
if (ipprot != NULL) {
int ret;
if (!net_eq(net, &init_net) && !ipprot->netns_ok) {
if (net_ratelimit())
printk("%s: proto %d isn't netns-ready\n",
__func__, protocol);
kfree_skb(skb);
goto out;
}
if (!ipprot->no_policy) {
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
kfree_skb(skb);
goto out;
}
nf_reset(skb);
}
ret = ipprot->handler(skb);
if (ret < 0) {
protocol = -ret;
goto resubmit;
}
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
} else {
if (!raw) {
if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0);
}
} else
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
kfree_skb(skb);
}
}
out:
rcu_read_unlock();
return 0;
}
示例15: pptp_outbound_pkt
static inline int
pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
struct PptpControlHeader *ctlh,
union pptp_ctrl_union *pptpReq,
unsigned int reqlen,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
struct nf_ct_pptp_master *info = nfct_help_data(ct);
u_int16_t msg;
__be16 cid = 0, pcid = 0;
typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
msg = ntohs(ctlh->messageType);
pr_debug("outbound control message %s\n", pptp_msg_name[msg]);
switch (msg) {
case PPTP_START_SESSION_REQUEST:
/* client requests for new control session */
if (info->sstate != PPTP_SESSION_NONE)
goto invalid;
info->sstate = PPTP_SESSION_REQUESTED;
break;
case PPTP_STOP_SESSION_REQUEST:
/* client requests end of control session */
info->sstate = PPTP_SESSION_STOPREQ;
break;
case PPTP_OUT_CALL_REQUEST:
/* client initiating connection to server */
if (info->sstate != PPTP_SESSION_CONFIRMED)
goto invalid;
info->cstate = PPTP_CALL_OUT_REQ;
/* track PNS call id */
cid = pptpReq->ocreq.callID;
pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
info->pns_call_id = cid;
break;
case PPTP_IN_CALL_REPLY:
/* client answers incoming call */
if (info->cstate != PPTP_CALL_IN_REQ &&
info->cstate != PPTP_CALL_IN_REP)
goto invalid;
cid = pptpReq->icack.callID;
pcid = pptpReq->icack.peersCallID;
if (info->pac_call_id != pcid)
goto invalid;
pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg],
ntohs(cid), ntohs(pcid));
if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
/* part two of the three-way handshake */
info->cstate = PPTP_CALL_IN_REP;
info->pns_call_id = cid;
} else
info->cstate = PPTP_CALL_NONE;
break;
case PPTP_CALL_CLEAR_REQUEST:
/* client requests hangup of call */
if (info->sstate != PPTP_SESSION_CONFIRMED)
goto invalid;
/* FUTURE: iterate over all calls and check if
* call ID is valid. We don't do this without newnat,
* because we only know about last call */
info->cstate = PPTP_CALL_CLEAR_REQ;
break;
case PPTP_SET_LINK_INFO:
case PPTP_ECHO_REQUEST:
case PPTP_ECHO_REPLY:
/* I don't have to explain these ;) */
break;
default:
goto invalid;
}
nf_nat_pptp_outbound = rcu_dereference(nf_nat_pptp_hook_outbound);
if (nf_nat_pptp_outbound && ct->status & IPS_NAT_MASK)
return nf_nat_pptp_outbound(skb, ct, ctinfo,
protoff, ctlh, pptpReq);
return NF_ACCEPT;
invalid:
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
return NF_ACCEPT;
}