本文整理汇总了C++中spin_unlock_bh函数的典型用法代码示例。如果您正苦于以下问题:C++ spin_unlock_bh函数的具体用法?C++ spin_unlock_bh怎么用?C++ spin_unlock_bh使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了spin_unlock_bh函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ax25_linkfail_release
void ax25_linkfail_release(struct ax25_linkfail *lf)
{
spin_lock_bh(&linkfail_lock);
hlist_del_init(&lf->lf_node);
spin_unlock_bh(&linkfail_lock);
}
示例2: ieee80211_agg_tx_operational
static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
struct sta_info *sta, u16 tid)
{
struct tid_ampdu_tx *tid_tx;
struct ieee80211_ampdu_params params = {
.sta = &sta->sta,
.action = IEEE80211_AMPDU_TX_OPERATIONAL,
.tid = tid,
.timeout = 0,
.ssn = 0,
};
lockdep_assert_held(&sta->ampdu_mlme.mtx);
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
params.buf_size = tid_tx->buf_size;
params.amsdu = tid_tx->amsdu;
ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
sta->sta.addr, tid);
drv_ampdu_action(local, sta->sdata, ¶ms);
/*
* synchronize with TX path, while splicing the TX path
* should block so it won't put more packets onto pending.
*/
spin_lock_bh(&sta->lock);
ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
/*
* Now mark as operational. This will be visible
* in the TX path, and lets it go lock-free in
* the common case.
*/
set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
ieee80211_agg_splice_finish(sta->sdata, tid);
spin_unlock_bh(&sta->lock);
ieee80211_agg_start_txq(sta, tid, true);
}
void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
struct tid_ampdu_tx *tid_tx;
trace_api_start_tx_ba_cb(sdata, ra, tid);
if (tid >= IEEE80211_NUM_TIDS) {
ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
tid, IEEE80211_NUM_TIDS);
return;
}
mutex_lock(&local->sta_mtx);
sta = sta_info_get_bss(sdata, ra);
if (!sta) {
mutex_unlock(&local->sta_mtx);
ht_dbg(sdata, "Could not find station: %pM\n", ra);
return;
}
mutex_lock(&sta->ampdu_mlme.mtx);
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
if (WARN_ON(!tid_tx)) {
ht_dbg(sdata, "addBA was not requested!\n");
goto unlock;
}
if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
goto unlock;
if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
ieee80211_agg_tx_operational(local, sta, tid);
unlock:
mutex_unlock(&sta->ampdu_mlme.mtx);
mutex_unlock(&local->sta_mtx);
}
void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
const u8 *ra, u16 tid)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_ra_tid *ra_tid;
struct sk_buff *skb = dev_alloc_skb(0);
if (unlikely(!skb))
return;
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
//.........这里部分代码省略.........
示例3: mipi_dsi_off
static int mipi_dsi_off(struct platform_device *pdev)
{
int ret = 0;
struct msm_fb_data_type *mfd;
struct msm_panel_info *pinfo;
pr_debug("%s+:\n", __func__);
mfd = platform_get_drvdata(pdev);
pinfo = &mfd->panel_info;
if (mdp_rev >= MDP_REV_41)
mutex_lock(&mfd->dma->ov_mutex);
else
down(&mfd->dma->mutex);
if (mfd->panel_info.type == MIPI_CMD_PANEL) {
mipi_dsi_prepare_ahb_clocks();
mipi_dsi_ahb_ctrl(1);
mipi_dsi_clk_enable();
/* make sure dsi_cmd_mdp is idle */
mipi_dsi_cmd_mdp_busy();
}
/*
* Desctiption: change to DSI_CMD_MODE since it needed to
* tx DCS dsiplay off comamnd to panel
*/
mipi_dsi_op_mode_config(DSI_CMD_MODE);
if (mfd->panel_info.type == MIPI_CMD_PANEL) {
if (pinfo->lcd.vsync_enable) {
if (pinfo->lcd.hw_vsync_mode && vsync_gpio >= 0) {
if (MDP_REV_303 != mdp_rev)
gpio_free(vsync_gpio);
}
mipi_dsi_set_tear_off(mfd);
}
}
ret = panel_next_off(pdev);
spin_lock_bh(&dsi_clk_lock);
mipi_dsi_clk_disable();
/* disbale dsi engine */
MIPI_OUTP(MIPI_DSI_BASE + 0x0000, 0);
mipi_dsi_phy_ctrl(0);
mipi_dsi_ahb_ctrl(0);
spin_unlock_bh(&dsi_clk_lock);
mipi_dsi_unprepare_clocks();
mipi_dsi_unprepare_ahb_clocks();
if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_power_save)
mipi_dsi_pdata->dsi_power_save(0);
if (mdp_rev >= MDP_REV_41)
mutex_unlock(&mfd->dma->ov_mutex);
else
up(&mfd->dma->mutex);
printk("LCD%s-:\n", __func__);
return ret;
}
示例4: br_device_event
/*
* Handle changes in state of network devices enslaved to a bridge.
*
* Note: don't care about up/down if bridge itself is down, because
* port state is checked when bridge is brought up.
*/
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net_bridge_port *p;
struct net_bridge *br;
bool changed_addr;
int err;
/* register of bridge completed, add sysfs entries */
if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
br_sysfs_addbr(dev);
return NOTIFY_DONE;
}
/* not a port of a bridge */
p = br_port_get_rtnl(dev);
if (!p)
return NOTIFY_DONE;
br = p->br;
switch (event) {
case NETDEV_CHANGEMTU:
dev_set_mtu(br->dev, br_min_mtu(br));
break;
case NETDEV_CHANGEADDR:
spin_lock_bh(&br->lock);
br_fdb_changeaddr(p, dev->dev_addr);
changed_addr = br_stp_recalculate_bridge_id(br);
spin_unlock_bh(&br->lock);
if (changed_addr)
call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
break;
case NETDEV_CHANGE:
br_port_carrier_check(p);
break;
case NETDEV_FEAT_CHANGE:
netdev_update_features(br->dev);
break;
case NETDEV_DOWN:
spin_lock_bh(&br->lock);
if (br->dev->flags & IFF_UP)
br_stp_disable_port(p);
spin_unlock_bh(&br->lock);
break;
case NETDEV_UP:
if (netif_running(br->dev) && netif_oper_up(dev)) {
spin_lock_bh(&br->lock);
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
}
break;
case NETDEV_UNREGISTER:
br_del_if(br, dev);
break;
case NETDEV_CHANGENAME:
err = br_sysfs_renameif(p);
if (err)
return notifier_from_errno(err);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, br->dev);
break;
}
/* Events that may cause spanning tree to refresh */
if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
event == NETDEV_CHANGE || event == NETDEV_DOWN)
br_ifinfo_notify(RTM_NEWLINK, p);
return NOTIFY_DONE;
}
示例5: ieee80211_tx_ba_session_handle_start
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
struct tid_ampdu_tx *tid_tx;
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_ampdu_params params = {
.sta = &sta->sta,
.action = IEEE80211_AMPDU_TX_START,
.tid = tid,
.buf_size = 0,
.amsdu = false,
.timeout = 0,
};
int ret;
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/*
* Start queuing up packets for this aggregation session.
* We're going to release them once the driver is OK with
* that.
*/
clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
ieee80211_agg_stop_txq(sta, tid);
/*
* Make sure no packets are being processed. This ensures that
* we have a valid starting sequence number and that in-flight
* packets have been flushed out and no packets for this TID
* will go into the driver during the ampdu_action call.
*/
synchronize_net();
params.ssn = sta->tid_seq[tid] >> 4;
ret = drv_ampdu_action(local, sdata, ¶ms);
if (ret) {
ht_dbg(sdata,
"BA request denied - HW unavailable for %pM tid %d\n",
sta->sta.addr, tid);
spin_lock_bh(&sta->lock);
ieee80211_agg_splice_packets(sdata, tid_tx, tid);
ieee80211_assign_tid_tx(sta, tid, NULL);
ieee80211_agg_splice_finish(sdata, tid);
spin_unlock_bh(&sta->lock);
ieee80211_agg_start_txq(sta, tid, false);
kfree_rcu(tid_tx, rcu_head);
return;
}
/* activate the timer for the recipient's addBA response */
mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
sta->sta.addr, tid);
spin_lock_bh(&sta->lock);
sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
sta->ampdu_mlme.addba_req_num[tid]++;
spin_unlock_bh(&sta->lock);
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
tid_tx->dialog_token, params.ssn,
IEEE80211_MAX_AMPDU_BUF,
tid_tx->timeout);
}
/*
* After accepting the AddBA Response we activated a timer,
* resetting it after each frame that we send.
*/
static void sta_tx_agg_session_timer_expired(unsigned long data)
{
/* not an elegant detour, but there is no choice as the timer passes
* only one argument, and various sta_info are needed here, so init
* flow in sta_info_create gives the TID as data, while the timer_to_id
* array gives the sta through container_of */
u8 *ptid = (u8 *)data;
u8 *timer_to_id = ptid - *ptid;
struct sta_info *sta = container_of(timer_to_id, struct sta_info,
timer_to_tid[0]);
struct tid_ampdu_tx *tid_tx;
unsigned long timeout;
rcu_read_lock();
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
rcu_read_unlock();
return;
}
timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
if (time_is_after_jiffies(timeout)) {
mod_timer(&tid_tx->session_timer, timeout);
rcu_read_unlock();
return;
}
//.........这里部分代码省略.........
示例6: ath10k_htt_mgmt_tx
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
u8 vdev_id = skb_cb->vdev_id;
int len = 0;
int msdu_id = -1;
int res;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(ar, len);
if (!txdesc) {
res = -ENOMEM;
goto err_free_msdu_id;
}
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr);
if (res)
goto err_free_txdesc;
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
skb_cb->htt.txbuf = NULL;
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
goto err_unmap_msdu;
return 0;
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
err:
return res;
}
示例7: hwmp_prep_frame_process
static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
const u8 *prep_elem, u32 metric)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_path *mpath;
const u8 *target_addr, *orig_addr;
u8 ttl, hopcount, flags;
u8 next_hop[ETH_ALEN];
u32 target_sn, orig_sn, lifetime;
mhwmp_dbg(sdata, "received PREP from %pM\n",
PREP_IE_TARGET_ADDR(prep_elem));
orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
if (ether_addr_equal(orig_addr, sdata->vif.addr))
/* destination, no forwarding required */
return;
if (!ifmsh->mshcfg.dot11MeshForwarding)
return;
ttl = PREP_IE_TTL(prep_elem);
if (ttl <= 1) {
sdata->u.mesh.mshstats.dropped_frames_ttl++;
return;
}
rcu_read_lock();
mpath = mesh_path_lookup(sdata, orig_addr);
if (mpath)
spin_lock_bh(&mpath->state_lock);
else
goto fail;
if (!(mpath->flags & MESH_PATH_ACTIVE)) {
spin_unlock_bh(&mpath->state_lock);
goto fail;
}
memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
spin_unlock_bh(&mpath->state_lock);
--ttl;
flags = PREP_IE_FLAGS(prep_elem);
lifetime = PREP_IE_LIFETIME(prep_elem);
hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
target_addr = PREP_IE_TARGET_ADDR(prep_elem);
target_sn = PREP_IE_TARGET_SN(prep_elem);
orig_sn = PREP_IE_ORIG_SN(prep_elem);
mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, orig_sn, 0,
target_addr, target_sn, next_hop, hopcount,
ttl, lifetime, metric, 0, sdata);
rcu_read_unlock();
sdata->u.mesh.mshstats.fwded_unicast++;
sdata->u.mesh.mshstats.fwded_frames++;
return;
fail:
rcu_read_unlock();
sdata->u.mesh.mshstats.dropped_frames_no_route++;
}
示例8: tipc_create
static int tipc_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
const struct proto_ops *ops;
socket_state state;
struct sock *sk;
struct tipc_port *tp_ptr;
/* Validate arguments */
if (unlikely(protocol != 0))
return -EPROTONOSUPPORT;
switch (sock->type) {
case SOCK_STREAM:
ops = &stream_ops;
state = SS_UNCONNECTED;
break;
case SOCK_SEQPACKET:
ops = &packet_ops;
state = SS_UNCONNECTED;
break;
case SOCK_DGRAM:
case SOCK_RDM:
ops = &msg_ops;
state = SS_READY;
break;
default:
return -EPROTOTYPE;
}
/* Allocate socket's protocol area */
sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
if (sk == NULL)
return -ENOMEM;
/* Allocate TIPC port for socket to use */
tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
TIPC_LOW_IMPORTANCE);
if (unlikely(!tp_ptr)) {
sk_free(sk);
return -ENOMEM;
}
/* Finish initializing socket data structures */
sock->ops = ops;
sock->state = state;
sock_init_data(sock, sk);
sk->sk_backlog_rcv = backlog_rcv;
tipc_sk(sk)->p = tp_ptr;
tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
spin_unlock_bh(tp_ptr->lock);
if (sock->state == SS_READY) {
tipc_set_portunreturnable(tp_ptr->ref, 1);
if (sock->type == SOCK_DGRAM)
tipc_set_portunreliable(tp_ptr->ref, 1);
}
return 0;
}
示例9: ath9k_wiphy_add
int ath9k_wiphy_add(struct ath_softc *sc)
{
int i, error;
struct ath_wiphy *aphy;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hw *hw;
u8 addr[ETH_ALEN];
hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
if (hw == NULL)
return -ENOMEM;
spin_lock_bh(&sc->wiphy_lock);
for (i = 0; i < sc->num_sec_wiphy; i++) {
if (sc->sec_wiphy[i] == NULL)
break;
}
if (i == sc->num_sec_wiphy) {
/* No empty slot available; increase array length */
struct ath_wiphy **n;
n = krealloc(sc->sec_wiphy,
(sc->num_sec_wiphy + 1) *
sizeof(struct ath_wiphy *),
GFP_ATOMIC);
if (n == NULL) {
spin_unlock_bh(&sc->wiphy_lock);
ieee80211_free_hw(hw);
return -ENOMEM;
}
n[i] = NULL;
sc->sec_wiphy = n;
sc->num_sec_wiphy++;
}
SET_IEEE80211_DEV(hw, sc->dev);
aphy = hw->priv;
aphy->sc = sc;
aphy->hw = hw;
sc->sec_wiphy[i] = aphy;
spin_unlock_bh(&sc->wiphy_lock);
memcpy(addr, common->macaddr, ETH_ALEN);
addr[0] |= 0x02; /* Locally managed address */
/*
* XOR virtual wiphy index into the least significant bits to generate
* a different MAC address for each virtual wiphy.
*/
addr[5] ^= i & 0xff;
addr[4] ^= (i & 0xff00) >> 8;
addr[3] ^= (i & 0xff0000) >> 16;
SET_IEEE80211_PERM_ADDR(hw, addr);
ath9k_set_hw_capab(sc, hw);
error = ieee80211_register_hw(hw);
if (error == 0) {
/* Make sure wiphy scheduler is started (if enabled) */
ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
}
return error;
}
示例10: ath9k_htc_start
static int ath9k_htc_start(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_channel *curchan = hw->conf.channel;
struct ath9k_channel *init_channel;
int ret = 0;
enum htc_phymode mode;
__be16 htc_mode;
u8 cmd_rsp;
mutex_lock(&priv->mutex);
ath_dbg(common, ATH_DBG_CONFIG,
"Starting driver with initial channel: %d MHz\n",
curchan->center_freq);
/* Ensure that HW is awake before flushing RX */
ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
WMI_CMD(WMI_FLUSH_RECV_CMDID);
/* setup initial channel */
init_channel = ath9k_cmn_get_curchannel(hw, ah);
ath9k_hw_htc_resetinit(ah);
ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (ret) {
ath_err(common,
"Unable to reset hardware; reset status %d (freq %u MHz)\n",
ret, curchan->center_freq);
mutex_unlock(&priv->mutex);
return ret;
}
ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
&priv->curtxpow);
mode = ath9k_htc_get_curmode(priv, init_channel);
htc_mode = cpu_to_be16(mode);
WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
WMI_CMD(WMI_ATH_INIT_CMDID);
WMI_CMD(WMI_START_RECV_CMDID);
ath9k_host_rx_init(priv);
ret = ath9k_htc_update_cap_target(priv, 0);
if (ret)
ath_dbg(common, ATH_DBG_CONFIG,
"Failed to update capability in target\n");
priv->op_flags &= ~OP_INVALID;
htc_start(priv->htc);
spin_lock_bh(&priv->tx.tx_lock);
priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
spin_unlock_bh(&priv->tx.tx_lock);
ieee80211_wake_queues(hw);
mod_timer(&priv->tx.cleanup_timer,
jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) {
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
AR_STOMP_LOW_WLAN_WGHT);
ath9k_hw_btcoex_enable(ah);
ath_htc_resume_btcoex_work(priv);
}
mutex_unlock(&priv->mutex);
return ret;
}
示例11: autofw_expect
static void
autofw_expect(struct nf_conn *ct, struct nf_conntrack_expect *exp)
{
struct nf_nat_range pre_range;
u_int32_t newdstip, newsrcip;
u_int16_t port;
int ret;
struct nf_conn_help *help;
struct nf_conn *exp_ct = exp->master;
struct nf_conntrack_expect *newexp;
int count;
/* expect has been removed from expect list, but expect isn't free yet. */
help = nfct_help(exp_ct);
DEBUGP("autofw_nat_expected: got ");
NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_autofw_lock);
port = ntohs(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all);
newdstip = exp->tuple.dst.u3.ip;
newsrcip = exp->tuple.src.u3.ip;
if (port < ntohs(help->help.ct_autofw_info.dport[0]) ||
port > ntohs(help->help.ct_autofw_info.dport[1])) {
spin_unlock_bh(&nf_nat_autofw_lock);
return;
}
/* Only need to do PRE_ROUTING */
port -= ntohs(help->help.ct_autofw_info.dport[0]);
port += ntohs(help->help.ct_autofw_info.to[0]);
pre_range.flags = IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED;
pre_range.min_ip = pre_range.max_ip = newdstip;
pre_range.min.all = pre_range.max.all = htons(port);
nf_nat_setup_info(ct, &pre_range, NF_IP_PRE_ROUTING);
spin_unlock_bh(&nf_nat_autofw_lock);
/* Add expect again */
/* alloc will set exp->master = exp_ct */
newexp = nf_conntrack_expect_alloc(exp_ct);
if (!newexp)
return;
newexp->tuple.src.u3.ip = exp->tuple.src.u3.ip;
newexp->tuple.dst.protonum = exp->tuple.dst.protonum;
newexp->mask.src.u3.ip = 0xFFFFFFFF;
newexp->mask.dst.protonum = 0xFF;
newexp->tuple.dst.u3.ip = exp->tuple.dst.u3.ip;
newexp->mask.dst.u3.ip = 0x0;
for (count = 1; count < NF_CT_TUPLE_L3SIZE; count++) {
newexp->tuple.src.u3.all[count] = 0x0;
newexp->tuple.dst.u3.all[count] = 0x0;
}
newexp->mask.dst.u.all = 0x0;
newexp->mask.src.u.all = 0x0;
newexp->mask.src.l3num = 0x0;
newexp->expectfn = autofw_expect;
newexp->helper = NULL;
newexp->flags = 0;
/*
* exp->timeout.expires will set as
* (jiffies + helper->timeout * HZ), when insert exp.
*/
ret = nf_conntrack_expect_related(newexp);
if (ret == 0)
nf_conntrack_expect_put(newexp);
}
示例12: cpu_to_le32
static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
struct sk_buff *skb_out;
struct cdc_mbim_state *info = (void *)&dev->data;
struct cdc_ncm_ctx *ctx = info->ctx;
__le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
u16 tci = 0;
bool is_ip;
u8 *c;
if (!ctx)
goto error;
if (skb) {
if (skb->len <= ETH_HLEN)
goto error;
/* Some applications using e.g. packet sockets will
* bypass the VLAN acceleration and create tagged
* ethernet frames directly. We primarily look for
* the accelerated out-of-band tag, but fall back if
* required
*/
skb_reset_mac_header(skb);
if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
__vlan_get_tag(skb, &tci) == 0) {
is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
skb_pull(skb, VLAN_ETH_HLEN);
} else {
is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
skb_pull(skb, ETH_HLEN);
}
/* Is IP session <0> tagged too? */
if (info->flags & FLAG_IPS0_VLAN) {
/* drop all untagged packets */
if (!tci)
goto error;
/* map MBIM_IPS0_VID to IPS<0> */
if (tci == MBIM_IPS0_VID)
tci = 0;
}
/* mapping VLANs to MBIM sessions:
* no tag => IPS session <0> if !FLAG_IPS0_VLAN
* 1 - 255 => IPS session <vlanid>
* 256 - 511 => DSS session <vlanid - 256>
* 512 - 4093 => unsupported, drop
* 4094 => IPS session <0> if FLAG_IPS0_VLAN
*/
switch (tci & 0x0f00) {
case 0x0000: /* VLAN ID 0 - 255 */
if (!is_ip)
goto error;
c = (u8 *)&sign;
c[3] = tci;
break;
case 0x0100: /* VLAN ID 256 - 511 */
if (is_ip)
goto error;
sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
c = (u8 *)&sign;
c[3] = tci;
break;
default:
netif_err(dev, tx_err, dev->net,
"unsupported tci=0x%04x\n", tci);
goto error;
}
}
spin_lock_bh(&ctx->mtx);
skb_out = cdc_ncm_fill_tx_frame(dev, skb, sign);
spin_unlock_bh(&ctx->mtx);
return skb_out;
error:
if (skb)
dev_kfree_skb_any(skb);
return NULL;
}
示例13: dev_mc_upload
void dev_mc_upload(struct net_device *dev)
{
spin_lock_bh(&dev->xmit_lock);
__dev_mc_upload(dev);
spin_unlock_bh(&dev->xmit_lock);
}
示例14: ipsec_sadb_cleanup
int ipsec_sadb_cleanup(__u8 proto)
{
unsigned i;
int error = 0;
struct ipsec_sa *ips;
/* struct ipsec_sa *ipsnext, **ipsprev; */
/* char sa[SATOT_BUF]; */
/* size_t sa_len; */
KLIPS_PRINT(debug_xform,
"klips_debug:ipsec_sadb_cleanup: "
"cleaning up proto=%d.\n",
proto);
spin_lock_bh(&tdb_lock);
for (i = 0; i < SADB_HASHMOD; i++) {
ips = ipsec_sadb_hash[i];
while (ips) {
ipsec_sadb_hash[i] = ips->ips_hnext;
ips->ips_hnext = NULL;
ipsec_sa_put(ips, IPSEC_REFSAADD);
ips = ipsec_sadb_hash[i];
}
}
/* errlab: */
spin_unlock_bh(&tdb_lock);
#if IPSEC_SA_REF_CODE
/* clean up SA reference table */
/* go through the ref table and clean out all the SAs */
KLIPS_PRINT(debug_xform,
"klips_debug:ipsec_sadb_cleanup: "
"removing SAref entries and tables.");
{
unsigned table, entry;
for (table = 0; table < IPSEC_SA_REF_MAINTABLE_NUM_ENTRIES;
table++) {
KLIPS_PRINT(debug_xform,
"klips_debug:ipsec_sadb_cleanup: "
"cleaning SAref table=%u.\n",
table);
if (ipsec_sadb.refTable[table] == NULL) {
printk("\n");
KLIPS_PRINT(debug_xform,
"klips_debug:ipsec_sadb_cleanup: "
"cleaned %u used refTables.\n",
table);
break;
}
for (entry = 0;
entry < IPSEC_SA_REF_SUBTABLE_NUM_ENTRIES;
entry++) {
if (ipsec_sadb.refTable[table]->entry[entry] !=
NULL) {
struct ipsec_sa *sa1 =
ipsec_sadb.refTable[table]->
entry[entry];
ipsec_sa_put(sa1, IPSEC_REFOTHER);
ipsec_sadb.refTable[table]->entry[entry
] = NULL;
}
}
}
}
#endif /* IPSEC_SA_REF_CODE */
return error;
}
示例15: ath9k_wiphy_select
int ath9k_wiphy_select(struct ath_wiphy *aphy)
{
struct ath_softc *sc = aphy->sc;
bool now;
spin_lock_bh(&sc->wiphy_lock);
if (__ath9k_wiphy_scanning(sc)) {
/*
* For now, we are using mac80211 sw scan and it expects to
* have full control over channel changes, so avoid wiphy
* scheduling during a scan. This could be optimized if the
* scanning control were moved into the driver.
*/
spin_unlock_bh(&sc->wiphy_lock);
return -EBUSY;
}
if (__ath9k_wiphy_pausing(sc)) {
if (sc->wiphy_select_failures == 0)
sc->wiphy_select_first_fail = jiffies;
sc->wiphy_select_failures++;
if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
{
printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
"out; disable/enable hw to recover\n");
__ath9k_wiphy_mark_all_paused(sc);
/*
* TODO: this workaround to fix hardware is unlikely to
* be specific to virtual wiphy changes. It can happen
* on normal channel change, too, and as such, this
* should really be made more generic. For example,
* tricker radio disable/enable on GTT interrupt burst
* (say, 10 GTT interrupts received without any TX
* frame being completed)
*/
spin_unlock_bh(&sc->wiphy_lock);
ath_radio_disable(sc, aphy->hw);
ath_radio_enable(sc, aphy->hw);
/* Only the primary wiphy hw is used for queuing work */
ieee80211_queue_work(aphy->sc->hw,
&aphy->sc->chan_work);
return -EBUSY; /* previous select still in progress */
}
spin_unlock_bh(&sc->wiphy_lock);
return -EBUSY; /* previous select still in progress */
}
sc->wiphy_select_failures = 0;
/* Store the new channel */
sc->chan_idx = aphy->chan_idx;
sc->chan_is_ht = aphy->chan_is_ht;
sc->next_wiphy = aphy;
__ath9k_wiphy_pause_all(sc);
now = !__ath9k_wiphy_pausing(aphy->sc);
spin_unlock_bh(&sc->wiphy_lock);
if (now) {
/* Ready to request channel change immediately */
ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
}
/*
* wiphys will be unpaused in ath9k_tx_status() once channel has been
* changed if any wiphy needs time to become paused.
*/
return 0;
}