本文整理汇总了C++中pr_debug_ratelimited函数的典型用法代码示例。如果您正苦于以下问题:C++ pr_debug_ratelimited函数的具体用法?C++ pr_debug_ratelimited怎么用?C++ pr_debug_ratelimited使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pr_debug_ratelimited函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: data_bridge_write_cb
static void data_bridge_write_cb(struct urb *urb)
{
struct sk_buff *skb = urb->context;
struct timestamp_info *info = (struct timestamp_info *)skb->cb;
struct data_bridge *dev = info->dev;
struct bridge *brdg = dev->brdg;
int pending;
pr_debug("%s: dev:%p\n", __func__, dev);
switch (urb->status) {
case 0: /*success*/
dev->to_modem++;
dev->tx_num_of_bytes += skb->len;
dbg_timestamp("UL", skb);
break;
case -EPROTO:
dev->err = -EPROTO;
break;
case -EPIPE:
set_bit(TX_HALT, &dev->flags);
dev_err(&dev->intf->dev, "%s: epout halted\n", __func__);
schedule_work(&dev->kevent);
/* FALLTHROUGH */
case -ESHUTDOWN:
case -ENOENT: /* suspended */
case -ECONNRESET: /* unplug */
case -EOVERFLOW: /*babble error*/
/* FALLTHROUGH */
default:
pr_debug_ratelimited("%s: non zero urb status = %d\n",
__func__, urb->status);
}
usb_free_urb(urb);
dev_kfree_skb_any(skb);
pending = atomic_dec_return(&dev->pending_txurbs);
/*flow ctrl*/
if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
__func__, pending);
dev->tx_unthrottled_cnt++;
if (brdg->ops.unthrottle_tx)
brdg->ops.unthrottle_tx(brdg->ctx);
}
/* if we are here after device disconnect
* usb_unbind_interface() takes care of
* residual pm_autopm_get_interface_* calls
*/
if (urb->dev->state != USB_STATE_NOTATTACHED)
usb_autopm_put_interface_async(dev->intf);
}
示例2: data_bridge_write_cb
static void data_bridge_write_cb(struct urb *urb)
{
struct sk_buff *skb = urb->context;
struct timestamp_info *info = (struct timestamp_info *)skb->cb;
struct data_bridge *dev = info->dev;
struct bridge *brdg = dev->brdg;
int pending;
pr_debug("%s: dev:%p\n", __func__, dev);
switch (urb->status) {
case 0:
dbg_timestamp("UL", skb);
break;
case -EPROTO:
dev->err = -EPROTO;
break;
case -EPIPE:
set_bit(TX_HALT, &dev->flags);
dev_err(&dev->intf->dev, "%s: epout halted\n", __func__);
schedule_work(&dev->kevent);
case -ESHUTDOWN:
case -ENOENT:
case -ECONNRESET:
case -EOVERFLOW:
default:
pr_debug_ratelimited("%s: non zero urb status = %d\n",
__func__, urb->status);
}
usb_free_urb(urb);
dev_kfree_skb_any(skb);
pending = atomic_dec_return(&dev->pending_txurbs);
if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
__func__, pending);
dev->tx_unthrottled_cnt++;
if (brdg->ops.unthrottle_tx)
brdg->ops.unthrottle_tx(brdg->ctx);
}
if (urb->dev->state != USB_STATE_NOTATTACHED)
usb_autopm_put_interface_async(dev->intf);
}
示例3: cgr_cb
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
{
caam_congested = congested;
if (congested) {
#ifdef CONFIG_DEBUG_FS
times_congested++;
#endif
pr_debug_ratelimited("CAAM entered congestion\n");
} else {
pr_debug_ratelimited("CAAM exited congestion\n");
}
}
示例4: usb_read_work_fn
static void usb_read_work_fn(struct work_struct *work)
{
unsigned long flags;
struct diag_request *req = NULL;
struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
read_work);
if (!ch)
return;
if (!ch->connected || !ch->enabled || ch->read_pending) {
pr_debug_ratelimited("diag: Discarding USB read, ch: %s connected: %d, enabled: %d, pending: %d\n",
ch->name, ch->connected, ch->enabled,
ch->read_pending);
return;
}
spin_lock_irqsave(&ch->lock, flags);
req = ch->read_ptr;
if (req) {
ch->read_pending = 1;
req->buf = ch->read_buf;
req->length = USB_MAX_OUT_BUF;
usb_diag_read(ch->hdl, req);
} else {
pr_err_ratelimited("diag: In %s invalid read req\n", __func__);
}
spin_unlock_irqrestore(&ch->lock, flags);
}
示例5: data_bridge_read_cb
static void data_bridge_read_cb(struct urb *urb)
{
struct bridge *brdg;
struct sk_buff *skb = urb->context;
struct timestamp_info *info = (struct timestamp_info *)skb->cb;
struct data_bridge *dev = info->dev;
bool queue = 0;
brdg = dev->brdg;
skb_put(skb, urb->actual_length);
switch (urb->status) {
case -ENOENT: /* suspended */
case 0: /* success */
queue = 1;
info->rx_done = get_timestamp();
spin_lock(&dev->rx_done.lock);
__skb_queue_tail(&dev->rx_done, skb);
spin_unlock(&dev->rx_done.lock);
#ifdef CONFIG_MDM_HSIC_PM
/* wakelock for fast dormancy */
if (urb->actual_length)
fast_dormancy_wakelock(rmnet_pm_dev);
#endif
break;
/*do not resubmit*/
case -EPIPE:
set_bit(RX_HALT, &dev->flags);
dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
schedule_work(&dev->kevent);
/* FALLTHROUGH */
case -ESHUTDOWN:
case -ECONNRESET: /* unplug */
case -EPROTO:
dev_kfree_skb_any(skb);
break;
/*resubmit */
case -EOVERFLOW: /*babble error*/
default:
queue = 1;
dev_kfree_skb_any(skb);
pr_debug_ratelimited("%s: non zero urb status = %d\n",
__func__, urb->status);
break;
}
spin_lock(&dev->rx_done.lock);
urb->context = NULL;
list_add_tail(&urb->urb_list, &dev->rx_idle);
spin_unlock(&dev->rx_done.lock);
/* during suspend handle rx packet, but do not queue rx work */
if (urb->status == -ENOENT)
return;
if (queue)
queue_work(dev->wq, &dev->process_rx_w);
}
示例6: hsic_write
static int hsic_write(int id, unsigned char *buf, int len, int ctxt)
{
int err = 0;
struct diag_hsic_info *ch = NULL;
if (id < 0 || id >= NUM_HSIC_DEV) {
pr_err_ratelimited("diag: In %s, invalid index %d\n",
__func__, id);
return -EINVAL;
}
if (!buf || len <= 0) {
pr_err_ratelimited("diag: In %s, ch %d, invalid buf %pK len %d\n",
__func__, id, buf, len);
return -EINVAL;
}
ch = &diag_hsic[id];
if (!ch->opened || !ch->enabled) {
pr_debug_ratelimited("diag: In %s, ch %d is disabled. opened %d enabled: %d\n",
__func__, id, ch->opened, ch->enabled);
return -EIO;
}
err = diag_bridge_write(ch->id, buf, len);
if (err) {
pr_err_ratelimited("diag: cannot write to HSIC ch %d, err: %d\n",
ch->id, err);
}
return err;
}
示例7: ctrl_write_callback
static void ctrl_write_callback(struct urb *urb)
{
#ifdef HTC_DEBUG_QMI_STUCK
struct ctrl_write_context *context = urb->context;
struct rmnet_ctrl_dev *dev = context->dev;
#else
struct rmnet_ctrl_dev *dev = urb->context;
#endif
#ifdef HTC_DEBUG_QMI_STUCK
del_timer(&context->timer);
if (unlikely(time_is_before_jiffies(context->start_jiffies + HZ)))
pr_err("[%s] urb %p takes %d msec to complete.\n", __func__,
urb, jiffies_to_msecs(jiffies - context->start_jiffies));
#endif
if (urb->status) {
dev->tx_ctrl_err_cnt++;
pr_debug_ratelimited("Write status/size %d/%d\n",
urb->status, urb->actual_length);
}
#ifdef HTC_LOG_RMNET_USB_CTRL
log_rmnet_usb_ctrl_event(dev->intf, "Tx cb", urb->actual_length);
#endif
kfree(urb->setup_packet);
kfree(urb->transfer_buffer);
usb_free_urb(urb);
usb_autopm_put_interface_async(dev->intf);
#ifdef HTC_DEBUG_QMI_STUCK
kfree(context);
#endif
}
示例8: uncompress_udp_header
static int uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
{
bool fail;
u8 tmp = 0, val = 0;
fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));
if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
pr_debug("UDP header uncompression\n");
switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
case LOWPAN_NHC_UDP_CS_P_00:
fail |= lowpan_fetch_skb(skb, &uh->source,
sizeof(uh->source));
fail |= lowpan_fetch_skb(skb, &uh->dest,
sizeof(uh->dest));
break;
case LOWPAN_NHC_UDP_CS_P_01:
fail |= lowpan_fetch_skb(skb, &uh->source,
sizeof(uh->source));
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh->dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
break;
case LOWPAN_NHC_UDP_CS_P_10:
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh->source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
fail |= lowpan_fetch_skb(skb, &uh->dest,
sizeof(uh->dest));
break;
case LOWPAN_NHC_UDP_CS_P_11:
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh->source = htons(LOWPAN_NHC_UDP_4BIT_PORT +
(val >> 4));
uh->dest = htons(LOWPAN_NHC_UDP_4BIT_PORT +
(val & 0x0f));
break;
default:
pr_debug("ERROR: unknown UDP format\n");
goto err;
}
pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
ntohs(uh->source), ntohs(uh->dest));
/* checksum */
if (tmp & LOWPAN_NHC_UDP_CS_C) {
pr_debug_ratelimited("checksum elided currently not supported\n");
goto err;
} else {
fail |= lowpan_fetch_skb(skb, &uh->check,
sizeof(uh->check));
}
/* UDP length needs to be infered from the lower layers
* here, we obtain the hint from the remaining size of the
* frame
*/
uh->len = htons(skb->len + sizeof(struct udphdr));
pr_debug("uncompressed UDP length: src = %d", ntohs(uh->len));
} else {
示例9: data_bridge_read_cb
static void data_bridge_read_cb(struct urb *urb)
{
struct bridge *brdg;
struct sk_buff *skb = urb->context;
struct timestamp_info *info = (struct timestamp_info *)skb->cb;
struct data_bridge *dev = info->dev;
bool queue = 0;
/*usb device disconnect*/
if (urb->dev->state == USB_STATE_NOTATTACHED)
urb->status = -ECONNRESET;
brdg = dev->brdg;
skb_put(skb, urb->actual_length);
switch (urb->status) {
case 0: /* success */
queue = 1;
info->rx_done = get_timestamp();
spin_lock(&dev->rx_done.lock);
__skb_queue_tail(&dev->rx_done, skb);
spin_unlock(&dev->rx_done.lock);
break;
/*do not resubmit*/
case -EPIPE:
set_bit(RX_HALT, &dev->flags);
dev_err(&dev->intf->dev, "%s: epout halted\n", __func__);
schedule_work(&dev->kevent);
/* FALLTHROUGH */
case -ESHUTDOWN:
case -ENOENT: /* suspended */
case -ECONNRESET: /* unplug */
case -EPROTO:
dev_kfree_skb_any(skb);
break;
/*resubmit */
case -EOVERFLOW: /*babble error*/
default:
queue = 1;
dev_kfree_skb_any(skb);
pr_debug_ratelimited("%s: non zero urb status = %d\n",
__func__, urb->status);
break;
}
spin_lock(&dev->rx_done.lock);
list_add_tail(&urb->urb_list, &dev->rx_idle);
spin_unlock(&dev->rx_done.lock);
if (queue)
queue_work(dev->wq, &dev->process_rx_w);
}
示例10: ctrl_write_callback
static void ctrl_write_callback(struct urb *urb)
{
struct rmnet_ctrl_dev *dev = urb->context;
if (urb->status) {
dev->tx_ctrl_err_cnt++;
pr_debug_ratelimited("Write status/size %d/%d\n",
urb->status, urb->actual_length);
}
kfree(urb->setup_packet);
kfree(urb->transfer_buffer);
usb_free_urb(urb);
usb_autopm_put_interface_async(dev->intf);
}
示例11: ghsuart_ctrl_receive
static int ghsuart_ctrl_receive(void *dev, void *buf, size_t actual)
{
struct ghsuart_ctrl_port *port = dev;
int retval = 0;
pr_debug_ratelimited("%s: read complete bytes read: %d\n",
__func__, actual);
/* send it to USB here */
if (port && port->send_cpkt_response) {
retval = port->send_cpkt_response(port->port_usb, buf, actual);
port->to_host++;
}
kfree(buf);
return retval;
}
示例12: diag_usb_write
int diag_usb_write(int id, unsigned char *buf, int len, int ctxt)
{
int err = 0;
struct diag_request *req = NULL;
struct diag_usb_info *usb_info = NULL;
if (id < 0 || id >= NUM_DIAG_USB_DEV) {
pr_err_ratelimited("diag: In %s, Incorrect id %d\n",
__func__, id);
return -EINVAL;
}
usb_info = &diag_usb[id];
req = diagmem_alloc(driver, sizeof(struct diag_request),
usb_info->mempool);
if (!req) {
/*
* This should never happen. It either means that we are
* trying to write more buffers than the max supported by
* this particualar diag USB channel at any given instance,
* or the previous write ptrs are stuck in the USB layer.
*/
pr_err_ratelimited("diag: In %s, cannot retrieve USB write ptrs for USB channel %s\n",
__func__, usb_info->name);
return -ENOMEM;
}
req->buf = buf;
req->length = len;
req->context = (void *)(uintptr_t)ctxt;
if (!usb_info->hdl || !usb_info->connected) {
pr_debug_ratelimited("diag: USB ch %s is not connected\n",
usb_info->name);
diagmem_free(driver, req, usb_info->mempool);
return -ENODEV;
}
err = usb_diag_write(usb_info->hdl, req);
if (err) {
pr_err_ratelimited("diag: In %s, error writing to usb channel %s, err: %d\n",
__func__, usb_info->name, err);
diagmem_free(driver, req, usb_info->mempool);
}
return err;
}
示例13: target_xcopy_locate_se_dev_e4
static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
struct se_device **found_dev)
{
struct xcopy_dev_search_info info;
int ret;
memset(&info, 0, sizeof(info));
info.dev_wwn = dev_wwn;
ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
if (ret == 1) {
*found_dev = info.found_dev;
return 0;
} else {
pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
return -EINVAL;
}
}
示例14: migrate_irqs
/*
* The current CPU has been marked offline. Migrate IRQs off this CPU.
* If the affinity settings do not allow other CPUs, force them onto any
* available CPU.
*
* Note: we must iterate over all IRQs, whether they have an attached
* action structure or not, as we need to get chained interrupts too.
*/
void migrate_irqs(void)
{
unsigned int i;
struct irq_desc *desc;
unsigned long flags;
local_irq_save(flags);
for_each_irq_desc(i, desc) {
bool affinity_broken;
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
if (affinity_broken)
pr_debug_ratelimited("IRQ%u no longer affine to CPU%u\n",
i, smp_processor_id());
}
示例15: pr_err_ratelimited
void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
{
void *buf = NULL;
int i = 0;
unsigned long flags;
struct diag_mempool_t *mempool = NULL;
if (!driver)
return NULL;
for (i = 0; i < NUM_MEMORY_POOLS; i++) {
mempool = &diag_mempools[i];
if (pool_type != mempool->id)
continue;
if (!mempool->pool) {
pr_err_ratelimited("diag: %s mempool is not initialized yet\n",
mempool->name);
break;
}
if (size == 0 || size > mempool->itemsize) {
pr_err_ratelimited("diag: cannot alloc from mempool %s, invalid size: %d\n",
mempool->name, size);
break;
}
spin_lock_irqsave(&mempool->lock, flags);
if (mempool->count < mempool->poolsize) {
atomic_add(1, (atomic_t *)&mempool->count);
buf = mempool_alloc(mempool->pool, GFP_ATOMIC);
kmemleak_not_leak(buf);
}
spin_unlock_irqrestore(&mempool->lock, flags);
if (!buf) {
pr_debug_ratelimited("diag: Unable to allocate buffer from memory pool %s, size: %d/%d count: %d/%d\n",
mempool->name,
size, mempool->itemsize,
mempool->count,
mempool->poolsize);
}
break;
}
return buf;
}