本文整理汇总了C++中rcu_read_lock函数的典型用法代码示例。如果您正苦于以下问题:C++ rcu_read_lock函数的具体用法?C++ rcu_read_lock怎么用?C++ rcu_read_lock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rcu_read_lock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: call_sbin_request_key
static int call_sbin_request_key(struct key_construction *cons,
const char *op,
void *aux)
{
const struct cred *cred = current_cred();
key_serial_t prkey, sskey;
struct key *key = cons->key, *authkey = cons->authkey, *keyring,
*session;
char *argv[9], *envp[3], uid_str[12], gid_str[12];
char key_str[12], keyring_str[3][12];
char desc[20];
int ret, i;
kenter("{%d},{%d},%s", key->serial, authkey->serial, op);
ret = install_user_keyrings();
if (ret < 0)
goto error_alloc;
/* */
sprintf(desc, "_req.%u", key->serial);
cred = get_current_cred();
keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
put_cred(cred);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error_alloc;
}
/* */
ret = key_link(keyring, authkey);
if (ret < 0)
goto error_link;
/* */
sprintf(uid_str, "%d", cred->fsuid);
sprintf(gid_str, "%d", cred->fsgid);
/* */
sprintf(key_str, "%d", key->serial);
/* */
sprintf(keyring_str[0], "%d",
cred->thread_keyring ? cred->thread_keyring->serial : 0);
prkey = 0;
if (cred->tgcred->process_keyring)
prkey = cred->tgcred->process_keyring->serial;
sprintf(keyring_str[1], "%d", prkey);
rcu_read_lock();
session = rcu_dereference(cred->tgcred->session_keyring);
if (!session)
session = cred->user->session_keyring;
sskey = session->serial;
rcu_read_unlock();
sprintf(keyring_str[2], "%d", sskey);
/* */
i = 0;
envp[i++] = "HOME=/";
envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[i] = NULL;
/* */
i = 0;
argv[i++] = "/sbin/request-key";
argv[i++] = (char *) op;
argv[i++] = key_str;
argv[i++] = uid_str;
argv[i++] = gid_str;
argv[i++] = keyring_str[0];
argv[i++] = keyring_str[1];
argv[i++] = keyring_str[2];
argv[i] = NULL;
/* */
ret = call_usermodehelper_keys(argv[0], argv, envp, keyring,
UMH_WAIT_PROC);
kdebug("usermode -> 0x%x", ret);
if (ret >= 0) {
/* */
if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) ||
key_validate(key) < 0)
ret = -ENOKEY;
else
/*
*/
ret = 0;
}
error_link:
key_put(keyring);
error_alloc:
complete_request_key(cons, ret);
kleave(" = %d", ret);
//.........这里部分代码省略.........
示例2: hwmp_route_info_get
/**
* hwmp_route_info_get - Update routing info to originator and transmitter
*
* @sdata: local mesh subif
* @mgmt: mesh management frame
* @hwmp_ie: hwmp information element (PREP or PREQ)
*
* This function updates the path routing information to the originator and the
* transmitter of a HWMP PREQ or PREP frame.
*
* Returns: metric to frame originator or 0 if the frame should not be further
* processed
*
* Notes: this function is the only place (besides user-provided info) where
* path routing information is updated.
*/
static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
u8 *hwmp_ie, enum mpath_frame_type action)
{
struct ieee80211_local *local = sdata->local;
struct mesh_path *mpath;
struct sta_info *sta;
bool fresh_info;
u8 *orig_addr, *ta;
u32 orig_sn, orig_metric;
unsigned long orig_lifetime, exp_time;
u32 last_hop_metric, new_metric;
bool process = true;
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
if (!sta) {
rcu_read_unlock();
return 0;
}
last_hop_metric = airtime_link_metric_get(local, sta);
/* Update and check originator routing info */
fresh_info = true;
switch (action) {
case MPATH_PREQ:
orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
orig_metric = PREQ_IE_METRIC(hwmp_ie);
break;
case MPATH_PREP:
/* Originator here refers to the MP that was the destination in
* the Path Request. The draft refers to that MP as the
* destination address, even though usually it is the origin of
* the PREP frame. We divert from the nomenclature in the draft
* so that we can easily use a single function to gather path
* information from both PREQ and PREP frames.
*/
orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
orig_sn = PREP_IE_ORIG_SN(hwmp_ie);
orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
orig_metric = PREP_IE_METRIC(hwmp_ie);
break;
default:
rcu_read_unlock();
return 0;
}
new_metric = orig_metric + last_hop_metric;
if (new_metric < orig_metric)
new_metric = MAX_METRIC;
exp_time = TU_TO_EXP_TIME(orig_lifetime);
if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) {
/* This MP is the originator, we are not interested in this
* frame, except for updating transmitter's path info.
*/
process = false;
fresh_info = false;
} else {
mpath = mesh_path_lookup(orig_addr, sdata);
if (mpath) {
spin_lock_bh(&mpath->state_lock);
if (mpath->flags & MESH_PATH_FIXED)
fresh_info = false;
else if ((mpath->flags & MESH_PATH_ACTIVE) &&
(mpath->flags & MESH_PATH_SN_VALID)) {
if (SN_GT(mpath->sn, orig_sn) ||
(mpath->sn == orig_sn &&
new_metric >= mpath->metric)) {
process = false;//ymj
fresh_info = false;
}
if(0 < orig_metric < (min_metric/5))
{
mhwmp_dbg("mesh hwmp: get orig_metric = %d\n",orig_metric);//ymj
process = false;
fresh_info = false;
}
}
} else {
mesh_path_add(orig_addr, sdata);
mpath = mesh_path_lookup(orig_addr, sdata);
//.........这里部分代码省略.........
示例3: rtl_op_bss_info_changed
static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
mutex_lock(&rtlpriv->locks.conf_mutex);
if ((vif->type == NL80211_IFTYPE_ADHOC) ||
(vif->type == NL80211_IFTYPE_AP) ||
(vif->type == NL80211_IFTYPE_MESH_POINT)) {
if ((changed & BSS_CHANGED_BEACON) ||
(changed & BSS_CHANGED_BEACON_ENABLED &&
bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 0) {
RT_TRACE(COMP_MAC80211, DBG_DMESG,
("BSS_CHANGED_BEACON_ENABLED \n"));
/*start hw beacon interrupt. */
/*rtlpriv->cfg->ops->set_bcn_reg(hw); */
mac->beacon_enabled = 1;
rtlpriv->cfg->ops->update_interrupt_mask(hw,
rtlpriv->cfg->maps
[RTL_IBSS_INT_MASKS], 0);
if (rtlpriv->cfg->ops->linked_set_reg)
rtlpriv->cfg->ops->linked_set_reg(hw);
}
}
if ((changed & BSS_CHANGED_BEACON_ENABLED &&
!bss_conf->enable_beacon)){
if (mac->beacon_enabled == 1) {
RT_TRACE(COMP_MAC80211, DBG_DMESG,
("ADHOC DISABLE BEACON\n"));
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
rtlpriv->cfg->maps
[RTL_IBSS_INT_MASKS]);
}
}
if (changed & BSS_CHANGED_BEACON_INT) {
RT_TRACE(COMP_BEACON, DBG_TRACE,
("BSS_CHANGED_BEACON_INT\n"));
mac->beacon_interval = bss_conf->beacon_int;
rtlpriv->cfg->ops->set_bcn_intv(hw);
}
}
/*TODO: reference to enum ieee80211_bss_change */
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
struct ieee80211_sta *sta = NULL;
/* we should reset all sec info & cam
* before set cam after linked, we should not
* reset in disassoc, that will cause tkip->wep
* fail because some flag will be wrong */
/* reset sec info */
rtl_cam_reset_sec_info(hw);
/* reset cam to fix wep fail issue
* when change from wpa to wep */
rtl_cam_reset_all_entry(hw);
mac->link_state = MAC80211_LINKED;
mac->cnt_after_linked = 0;
mac->assoc_id = bss_conf->aid;
memcpy(mac->bssid, bss_conf->bssid, 6);
if (rtlpriv->cfg->ops->linked_set_reg)
rtlpriv->cfg->ops->linked_set_reg(hw);
rcu_read_lock();
sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid);
if (vif->type == NL80211_IFTYPE_STATION && sta)
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
RT_TRACE(COMP_EASY_CONCURRENT, DBG_LOUD,
("send PS STATIC frame \n"));
if (rtlpriv->dm.supp_phymode_switch) {
if (sta->ht_cap.ht_supported)
rtl_send_smps_action(hw, sta,
IEEE80211_SMPS_STATIC);
}
rcu_read_unlock();
RT_TRACE(COMP_MAC80211, DBG_DMESG,
("BSS_CHANGED_ASSOC\n"));
} else {
if (mac->link_state == MAC80211_LINKED)
rtl_lps_leave(hw);
if (ppsc->p2p_ps_info.p2p_ps_mode> P2P_PS_NONE)
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
mac->link_state = MAC80211_NOLINK;
memset(mac->bssid, 0, 6);
mac->vendor = PEER_UNKNOWN;
if (rtlpriv->dm.supp_phymode_switch) {
//.........这里部分代码省略.........
示例4: rtl8723ae_dm_refresh_rate_adaptive_mask
static void rtl8723ae_dm_refresh_rate_adaptive_mask( struct ieee80211_hw *hw )
{
struct rtl_priv *rtlpriv = rtl_priv( hw );
struct rtl_hal *rtlhal = rtl_hal( rtl_priv( hw ) );
struct rtl_mac *mac = rtl_mac( rtl_priv( hw ) );
struct rate_adaptive *p_ra = &( rtlpriv->ra );
u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
struct ieee80211_sta *sta = NULL;
if ( is_hal_stop( rtlhal ) ) {
RT_TRACE( rtlpriv, COMP_RATE, DBG_LOUD,
" driver is going to unload\n" );
return;
}
if ( !rtlpriv->dm.useramask ) {
RT_TRACE( rtlpriv, COMP_RATE, DBG_LOUD,
" driver does not control rate adaptive mask\n" );
return;
}
if ( mac->link_state == MAC80211_LINKED &&
mac->opmode == NL80211_IFTYPE_STATION ) {
switch ( p_ra->pre_ratr_state ) {
case DM_RATR_STA_HIGH:
high_rssithresh_for_ra = 50;
low_rssithresh_for_ra = 20;
break;
case DM_RATR_STA_MIDDLE:
high_rssithresh_for_ra = 55;
low_rssithresh_for_ra = 20;
break;
case DM_RATR_STA_LOW:
high_rssithresh_for_ra = 50;
low_rssithresh_for_ra = 25;
break;
default:
high_rssithresh_for_ra = 50;
low_rssithresh_for_ra = 20;
break;
}
if ( rtlpriv->dm.undec_sm_pwdb > high_rssithresh_for_ra )
p_ra->ratr_state = DM_RATR_STA_HIGH;
else if ( rtlpriv->dm.undec_sm_pwdb > low_rssithresh_for_ra )
p_ra->ratr_state = DM_RATR_STA_MIDDLE;
else
p_ra->ratr_state = DM_RATR_STA_LOW;
if ( p_ra->pre_ratr_state != p_ra->ratr_state ) {
RT_TRACE( rtlpriv, COMP_RATE, DBG_LOUD,
"RSSI = %ld\n",
rtlpriv->dm.undec_sm_pwdb );
RT_TRACE( rtlpriv, COMP_RATE, DBG_LOUD,
"RSSI_LEVEL = %d\n", p_ra->ratr_state );
RT_TRACE( rtlpriv, COMP_RATE, DBG_LOUD,
"PreState = %d, CurState = %d\n",
p_ra->pre_ratr_state, p_ra->ratr_state );
rcu_read_lock();
sta = rtl_find_sta( hw, mac->bssid );
if ( sta )
rtlpriv->cfg->ops->update_rate_tbl( hw, sta,
p_ra->ratr_state );
rcu_read_unlock();
p_ra->pre_ratr_state = p_ra->ratr_state;
}
}
}
示例5: shm_lock_by_ptr
static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
{
rcu_read_lock();
spin_lock(&ipcp->shm_perm.lock);
}
示例6: arp_solicit
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
{
__be32 saddr = 0;
u8 *dst_ha = NULL;
struct net_device *dev = neigh->dev;
__be32 target = *(__be32 *)neigh->primary_key;
int probes = atomic_read(&neigh->probes);
struct in_device *in_dev;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (!in_dev) {
rcu_read_unlock();
return;
}
switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
default:
case 0:
if (skb && inet_addr_type(dev_net(dev),
ip_hdr(skb)->saddr) == RTN_LOCAL)
saddr = ip_hdr(skb)->saddr;
break;
case 1:
if (!skb)
break;
saddr = ip_hdr(skb)->saddr;
if (inet_addr_type(dev_net(dev), saddr) == RTN_LOCAL) {
if (inet_addr_onlink(in_dev, target, saddr))
break;
}
saddr = 0;
break;
case 2:
break;
}
rcu_read_unlock();
if (!saddr)
saddr = inet_select_addr(dev, target, RT_SCOPE_LINK);
probes -= neigh->parms->ucast_probes;
if (probes < 0) {
if (!(neigh->nud_state & NUD_VALID))
printk(KERN_DEBUG
"trying to ucast probe in NUD_INVALID\n");
dst_ha = neigh->ha;
read_lock_bh(&neigh->lock);
} else {
probes -= neigh->parms->app_probes;
if (probes < 0) {
#ifdef CONFIG_ARPD
neigh_app_ns(neigh);
#endif
return;
}
}
arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
dst_ha, dev->dev_addr, NULL);
if (dst_ha)
read_unlock_bh(&neigh->lock);
}
示例7: nfnetlink_rcv_msg
/* Process one complete nfnetlink message. */
static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
const struct nfnl_callback *nc;
const struct nfnetlink_subsystem *ss;
int type, err;
/* All the messages must at least contain nfgenmsg */
if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
return 0;
type = nlh->nlmsg_type;
replay:
rcu_read_lock();
ss = nfnetlink_get_subsys(type);
if (!ss) {
#ifdef CONFIG_MODULES
rcu_read_unlock();
request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type));
rcu_read_lock();
ss = nfnetlink_get_subsys(type);
if (!ss)
#endif
{
rcu_read_unlock();
return -EINVAL;
}
}
nc = nfnetlink_find_client(type, ss);
if (!nc) {
rcu_read_unlock();
return -EINVAL;
}
{
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
struct nlattr *attr = (void *)nlh + min_len;
int attrlen = nlh->nlmsg_len - min_len;
__u8 subsys_id = NFNL_SUBSYS_ID(type);
err = nla_parse(cda, ss->cb[cb_id].attr_count,
attr, attrlen, ss->cb[cb_id].policy);
if (err < 0) {
rcu_read_unlock();
return err;
}
if (nc->call_rcu) {
err = nc->call_rcu(net->nfnl, skb, nlh,
(const struct nlattr **)cda);
rcu_read_unlock();
} else {
rcu_read_unlock();
nfnl_lock(subsys_id);
if (rcu_dereference_protected(table[subsys_id].subsys,
lockdep_is_held(&table[subsys_id].mutex)) != ss ||
nfnetlink_find_client(type, ss) != nc)
err = -EAGAIN;
else if (nc->call)
err = nc->call(net->nfnl, skb, nlh,
(const struct nlattr **)cda);
else
err = -EINVAL;
nfnl_unlock(subsys_id);
}
if (err == -EAGAIN)
goto replay;
return err;
}
}
示例8: ipv6_rcv
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
{
struct ipv6hdr *hdr;
u32 pkt_len;
struct inet6_dev *idev;
if (skb->pkt_type == PACKET_OTHERHOST) {
kfree_skb(skb);
return 0;
}
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
!idev || unlikely(idev->cnf.disable_ipv6)) {
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS);
goto drop;
}
memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
/*
* Store incoming device index. When the packet will
* be queued, we cannot refer to skb->dev anymore.
*
* BTW, when we send a packet for our own local address on a
* non-loopback interface (e.g. ethX), it is being delivered
* via the loopback interface (lo) here; skb->dev = &loopback_dev.
* It, however, should be considered as if it is being
* arrived via the sending interface (ethX), because of the
* nature of scoping architecture. --yoshfuji
*/
IP6CB(skb)->iif = skb->dst ? ip6_dst_idev(skb->dst)->dev->ifindex : dev->ifindex;
if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
goto err;
hdr = ipv6_hdr(skb);
if (hdr->version != 6)
goto err;
skb->transport_header = skb->network_header + sizeof(*hdr);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
pkt_len = ntohs(hdr->payload_len);
/* pkt_len may be zero if Jumbo payload option is present */
if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
}
if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) {
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS);
goto drop;
}
hdr = ipv6_hdr(skb);
}
if (hdr->nexthdr == NEXTHDR_HOP) {
if (ipv6_parse_hopopts(skb) < 0) {
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS);
rcu_read_unlock();
return 0;
}
}
rcu_read_unlock();
return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish);
err:
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS);
drop:
rcu_read_unlock();
kfree_skb(skb);
return 0;
}
示例9: arp_constructor
static int arp_constructor(struct neighbour *neigh)
{
__be32 addr = *(__be32 *)neigh->primary_key;
struct net_device *dev = neigh->dev;
struct in_device *in_dev;
struct neigh_parms *parms;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev == NULL) {
rcu_read_unlock();
return -EINVAL;
}
neigh->type = inet_addr_type(dev_net(dev), addr);
parms = in_dev->arp_parms;
__neigh_parms_put(neigh->parms);
neigh->parms = neigh_parms_clone(parms);
rcu_read_unlock();
if (!dev->header_ops) {
neigh->nud_state = NUD_NOARP;
neigh->ops = &arp_direct_ops;
neigh->output = neigh_direct_output;
} else {
#if 1
switch (dev->type) {
default:
break;
case ARPHRD_ROSE:
#if IS_ENABLED(CONFIG_AX25)
case ARPHRD_AX25:
#if IS_ENABLED(CONFIG_NETROM)
case ARPHRD_NETROM:
#endif
neigh->ops = &arp_broken_ops;
neigh->output = neigh->ops->output;
return 0;
#else
break;
#endif
}
#endif
if (neigh->type == RTN_MULTICAST) {
neigh->nud_state = NUD_NOARP;
arp_mc_map(addr, neigh->ha, dev, 1);
} else if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) {
neigh->nud_state = NUD_NOARP;
memcpy(neigh->ha, dev->dev_addr, dev->addr_len);
} else if (neigh->type == RTN_BROADCAST ||
(dev->flags & IFF_POINTOPOINT)) {
neigh->nud_state = NUD_NOARP;
memcpy(neigh->ha, dev->broadcast, dev->addr_len);
}
if (dev->header_ops->cache)
neigh->ops = &arp_hh_ops;
else
neigh->ops = &arp_generic_ops;
if (neigh->nud_state & NUD_VALID)
neigh->output = neigh->ops->connected_output;
else
neigh->output = neigh->ops->output;
}
return 0;
}
示例10: ip6_input_finish
static int ip6_input_finish(struct sk_buff *skb)
{
struct inet6_protocol *ipprot;
struct sock *raw_sk;
unsigned int nhoff;
int nexthdr;
u8 hash;
struct inet6_dev *idev;
/*
* Parse extension headers
*/
rcu_read_lock();
resubmit:
idev = ip6_dst_idev(skb->dst);
if (!pskb_pull(skb, skb_transport_offset(skb)))
goto discard;
nhoff = IP6CB(skb)->nhoff;
nexthdr = skb_network_header(skb)[nhoff];
raw_sk = sk_head(&raw_v6_htable[nexthdr & (MAX_INET_PROTOS - 1)]);
if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
raw_sk = NULL;
hash = nexthdr & (MAX_INET_PROTOS - 1);
if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) {
int ret;
if (ipprot->flags & INET6_PROTO_FINAL) {
struct ipv6hdr *hdr;
/* Free reference early: we don't need it any more,
and it may hold ip_conntrack module loaded
indefinitely. */
nf_reset(skb);
skb_postpull_rcsum(skb, skb_network_header(skb),
skb_network_header_len(skb));
hdr = ipv6_hdr(skb);
if (ipv6_addr_is_multicast(&hdr->daddr) &&
!ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
&hdr->saddr) &&
!ipv6_is_mld(skb, nexthdr))
goto discard;
}
if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
ret = ipprot->handler(skb);
if (ret > 0)
goto resubmit;
else if (ret == 0)
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS);
} else {
if (!raw_sk) {
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INUNKNOWNPROTOS);
icmpv6_send(skb, ICMPV6_PARAMPROB,
ICMPV6_UNK_NEXTHDR, nhoff,
skb->dev);
}
} else
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS);
kfree_skb(skb);
}
rcu_read_unlock();
return 0;
discard:
IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS);
rcu_read_unlock();
kfree_skb(skb);
return 0;
}
示例11: zap_threads
static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
struct core_state *core_state, int exit_code)
{
struct task_struct *g, *p;
unsigned long flags;
int nr = -EAGAIN;
spin_lock_irq(&tsk->sighand->siglock);
if (!signal_group_exit(tsk->signal)) {
mm->core_state = core_state;
nr = zap_process(tsk, exit_code);
}
spin_unlock_irq(&tsk->sighand->siglock);
if (unlikely(nr < 0))
return nr;
if (atomic_read(&mm->mm_users) == nr + 1)
goto done;
/*
* We should find and kill all tasks which use this mm, and we should
* count them correctly into ->nr_threads. We don't take tasklist
* lock, but this is safe wrt:
*
* fork:
* None of sub-threads can fork after zap_process(leader). All
* processes which were created before this point should be
* visible to zap_threads() because copy_process() adds the new
* process to the tail of init_task.tasks list, and lock/unlock
* of ->siglock provides a memory barrier.
*
* do_exit:
* The caller holds mm->mmap_sem. This means that the task which
* uses this mm can't pass exit_mm(), so it can't exit or clear
* its ->mm.
*
* de_thread:
* It does list_replace_rcu(&leader->tasks, ¤t->tasks),
* we must see either old or new leader, this does not matter.
* However, it can change p->sighand, so lock_task_sighand(p)
* must be used. Since p->mm != NULL and we hold ->mmap_sem
* it can't fail.
*
* Note also that "g" can be the old leader with ->mm == NULL
* and already unhashed and thus removed from ->thread_group.
* This is OK, __unhash_process()->list_del_rcu() does not
* clear the ->next pointer, we will find the new leader via
* next_thread().
*/
rcu_read_lock();
for_each_process(g) {
if (g == tsk->group_leader)
continue;
if (g->flags & PF_KTHREAD)
continue;
p = g;
do {
if (p->mm) {
if (unlikely(p->mm == mm)) {
lock_task_sighand(p, &flags);
nr += zap_process(p, exit_code);
unlock_task_sighand(p, &flags);
}
break;
}
} while_each_thread(g, p);
}
rcu_read_unlock();
done:
atomic_set(&core_state->nr_threads, nr);
return nr;
}
示例12: construct_get_dest_keyring
static void construct_get_dest_keyring(struct key **_dest_keyring)
{
struct request_key_auth *rka;
const struct cred *cred = current_cred();
struct key *dest_keyring = *_dest_keyring, *authkey;
kenter("%p", dest_keyring);
/* */
if (dest_keyring) {
/* */
key_get(dest_keyring);
} else {
/*
*/
switch (cred->jit_keyring) {
case KEY_REQKEY_DEFL_DEFAULT:
case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
if (cred->request_key_auth) {
authkey = cred->request_key_auth;
down_read(&authkey->sem);
rka = authkey->payload.data;
if (!test_bit(KEY_FLAG_REVOKED,
&authkey->flags))
dest_keyring =
key_get(rka->dest_keyring);
up_read(&authkey->sem);
if (dest_keyring)
break;
}
case KEY_REQKEY_DEFL_THREAD_KEYRING:
dest_keyring = key_get(cred->thread_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
dest_keyring = key_get(cred->tgcred->process_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_SESSION_KEYRING:
rcu_read_lock();
dest_keyring = key_get(
rcu_dereference(cred->tgcred->session_keyring));
rcu_read_unlock();
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
dest_keyring =
key_get(cred->user->session_keyring);
break;
case KEY_REQKEY_DEFL_USER_KEYRING:
dest_keyring = key_get(cred->user->uid_keyring);
break;
case KEY_REQKEY_DEFL_GROUP_KEYRING:
default:
BUG();
}
}
*_dest_keyring = dest_keyring;
kleave(" [dk %d]", key_serial(dest_keyring));
return;
}
示例13: ___ieee80211_stop_tx_ba_session
//.........这里部分代码省略.........
/* not even started yet! */
ieee80211_assign_tid_tx(sta, tid, NULL);
spin_unlock_bh(&sta->lock);
kfree_rcu(tid_tx, rcu_head);
return 0;
}
set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
spin_unlock_bh(&sta->lock);
ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
sta->sta.addr, tid);
del_timer_sync(&tid_tx->addba_resp_timer);
del_timer_sync(&tid_tx->session_timer);
/*
* After this packets are no longer handed right through
* to the driver but are put onto tid_tx->pending instead,
* with locking to ensure proper access.
*/
clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
/*
* There might be a few packets being processed right now (on
* another CPU) that have already gotten past the aggregation
* check when it was still OPERATIONAL and consequently have
* IEEE80211_TX_CTL_AMPDU set. In that case, this code might
* call into the driver at the same time or even before the
* TX paths calls into it, which could confuse the driver.
*
* Wait for all currently running TX paths to finish before
* telling the driver. New packets will not go through since
* the aggregation session is no longer OPERATIONAL.
*/
synchronize_net();
tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
WLAN_BACK_RECIPIENT :
WLAN_BACK_INITIATOR;
tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
ret = drv_ampdu_action(local, sta->sdata, ¶ms);
/* HW shall not deny going back to legacy */
if (WARN_ON(ret)) {
/*
* We may have pending packets get stuck in this case...
* Not bothering with a workaround for now.
*/
}
/*
* In the case of AGG_STOP_DESTROY_STA, the driver won't
* necessarily call ieee80211_stop_tx_ba_cb(), so this may
* seem like we can leave the tid_tx data pending forever.
* This is true, in a way, but "forever" is only until the
* station struct is actually destroyed. In the meantime,
* leaving it around ensures that we don't transmit packets
* to the driver on this TID which might confuse it.
*/
return 0;
}
/*
* After sending add Block Ack request we activated a timer until
* add Block Ack response will arrive from the recipient.
* If this timer expires sta_addba_resp_timer_expired will be executed.
*/
static void sta_addba_resp_timer_expired(unsigned long data)
{
/* not an elegant detour, but there is no choice as the timer passes
* only one argument, and both sta_info and TID are needed, so init
* flow in sta_info_create gives the TID as data, while the timer_to_id
* array gives the sta through container_of */
u16 tid = *(u8 *)data;
struct sta_info *sta = container_of((void *)data,
struct sta_info, timer_to_tid[tid]);
struct tid_ampdu_tx *tid_tx;
/* check if the TID waits for addBA response */
rcu_read_lock();
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
if (!tid_tx ||
test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
rcu_read_unlock();
ht_dbg(sta->sdata,
"timer expired on %pM tid %d but we are not (or no longer) expecting addBA response there\n",
sta->sta.addr, tid);
return;
}
ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n",
sta->sta.addr, tid);
ieee80211_stop_tx_ba_session(&sta->sta, tid);
rcu_read_unlock();
}
示例14: ieee80211_tx_ba_session_handle_start
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
struct tid_ampdu_tx *tid_tx;
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_ampdu_params params = {
.sta = &sta->sta,
.action = IEEE80211_AMPDU_TX_START,
.tid = tid,
.buf_size = 0,
.amsdu = false,
.timeout = 0,
};
int ret;
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/*
* Start queuing up packets for this aggregation session.
* We're going to release them once the driver is OK with
* that.
*/
clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
ieee80211_agg_stop_txq(sta, tid);
/*
* Make sure no packets are being processed. This ensures that
* we have a valid starting sequence number and that in-flight
* packets have been flushed out and no packets for this TID
* will go into the driver during the ampdu_action call.
*/
synchronize_net();
params.ssn = sta->tid_seq[tid] >> 4;
ret = drv_ampdu_action(local, sdata, ¶ms);
if (ret) {
ht_dbg(sdata,
"BA request denied - HW unavailable for %pM tid %d\n",
sta->sta.addr, tid);
spin_lock_bh(&sta->lock);
ieee80211_agg_splice_packets(sdata, tid_tx, tid);
ieee80211_assign_tid_tx(sta, tid, NULL);
ieee80211_agg_splice_finish(sdata, tid);
spin_unlock_bh(&sta->lock);
ieee80211_agg_start_txq(sta, tid, false);
kfree_rcu(tid_tx, rcu_head);
return;
}
/* activate the timer for the recipient's addBA response */
mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
sta->sta.addr, tid);
spin_lock_bh(&sta->lock);
sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
sta->ampdu_mlme.addba_req_num[tid]++;
spin_unlock_bh(&sta->lock);
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
tid_tx->dialog_token, params.ssn,
IEEE80211_MAX_AMPDU_BUF,
tid_tx->timeout);
}
/*
* After accepting the AddBA Response we activated a timer,
* resetting it after each frame that we send.
*/
static void sta_tx_agg_session_timer_expired(unsigned long data)
{
/* not an elegant detour, but there is no choice as the timer passes
* only one argument, and various sta_info are needed here, so init
* flow in sta_info_create gives the TID as data, while the timer_to_id
* array gives the sta through container_of */
u8 *ptid = (u8 *)data;
u8 *timer_to_id = ptid - *ptid;
struct sta_info *sta = container_of(timer_to_id, struct sta_info,
timer_to_tid[0]);
struct tid_ampdu_tx *tid_tx;
unsigned long timeout;
rcu_read_lock();
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
rcu_read_unlock();
return;
}
timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
if (time_is_after_jiffies(timeout)) {
mod_timer(&tid_tx->session_timer, timeout);
rcu_read_unlock();
return;
}
//.........这里部分代码省略.........
示例15: xfs_iget
/*
* Look up an inode by number in the given file system.
* The inode is looked up in the cache held in each AG.
* If the inode is found in the cache, initialise the vfs inode
* if necessary.
*
* If it is not in core, read it in from the file system's device,
* add it to the cache and initialise the vfs inode.
*
* The inode is locked according to the value of the lock_flags parameter.
* This flag parameter indicates how and if the inode's IO lock and inode lock
* should be taken.
*
* mp -- the mount point structure for the current file system. It points
* to the inode hash table.
* tp -- a pointer to the current transaction if there is one. This is
* simply passed through to the xfs_iread() call.
* ino -- the number of the inode desired. This is the unique identifier
* within the file system for the inode being requested.
* lock_flags -- flags indicating how to lock the inode. See the comment
* for xfs_ilock() for a list of valid values.
*/
int
xfs_iget(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_ino_t ino,
uint flags,
uint lock_flags,
xfs_inode_t **ipp)
{
xfs_inode_t *ip;
int error;
xfs_perag_t *pag;
xfs_agino_t agino;
/* reject inode numbers outside existing AGs */
if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
return EINVAL;
/* get the perag structure and ensure that it's inode capable */
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
agino = XFS_INO_TO_AGINO(mp, ino);
again:
error = 0;
rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root, agino);
if (ip) {
error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
if (error)
goto out_error_or_again;
} else {
rcu_read_unlock();
XFS_STATS_INC(xs_ig_missed);
error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
flags, lock_flags);
if (error)
goto out_error_or_again;
}
xfs_perag_put(pag);
*ipp = ip;
ASSERT(ip->i_df.if_ext_max ==
XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
/*
* If we have a real type for an on-disk inode, we can set ops(&unlock)
* now. If it's a new inode being created, xfs_ialloc will handle it.
*/
if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
xfs_setup_inode(ip);
return 0;
out_error_or_again:
if (error == EAGAIN) {
delay(1);
goto again;
}
xfs_perag_put(pag);
return error;
}