本文整理汇总了C++中pr_alert函数的典型用法代码示例。如果您正苦于以下问题:C++ pr_alert函数的具体用法?C++ pr_alert怎么用?C++ pr_alert使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pr_alert函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: snd_vortex_probe
//.........这里部分代码省略.........
// (4) Alloc components.
err = snd_vortex_mixer(chip);
if (err < 0) {
snd_card_free(card);
return err;
}
// ADB pcm.
err = snd_vortex_new_pcm(chip, VORTEX_PCM_ADB, NR_PCM);
if (err < 0) {
snd_card_free(card);
return err;
}
#ifndef CHIP_AU8820
// ADB SPDIF
if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_SPDIF, 1)) < 0) {
snd_card_free(card);
return err;
}
// A3D
if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_A3D, NR_A3D)) < 0) {
snd_card_free(card);
return err;
}
#endif
/*
// ADB I2S
if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_I2S, 1)) < 0) {
snd_card_free(card);
return err;
}
*/
#ifndef CHIP_AU8810
// WT pcm.
if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_WT, NR_WT)) < 0) {
snd_card_free(card);
return err;
}
#endif
if ((err = snd_vortex_midi(chip)) < 0) {
snd_card_free(card);
return err;
}
vortex_gameport_register(chip);
#if 0
if (snd_seq_device_new(card, 1, SNDRV_SEQ_DEV_ID_VORTEX_SYNTH,
sizeof(snd_vortex_synth_arg_t), &wave) < 0
|| wave == NULL) {
snd_printk(KERN_ERR "Can't initialize Aureal wavetable synth\n");
} else {
snd_vortex_synth_arg_t *arg;
arg = SNDRV_SEQ_DEVICE_ARGPTR(wave);
strcpy(wave->name, "Aureal Synth");
arg->hwptr = vortex;
arg->index = 1;
arg->seq_ports = seq_ports[dev];
arg->max_voices = max_synth_voices[dev];
}
#endif
// (5)
if ((err = pci_read_config_word(pci, PCI_DEVICE_ID,
&(chip->device))) < 0) {
snd_card_free(card);
return err;
}
if ((err = pci_read_config_word(pci, PCI_VENDOR_ID,
&(chip->vendor))) < 0) {
snd_card_free(card);
return err;
}
chip->rev = pci->revision;
#ifdef CHIP_AU8830
if ((chip->rev) != 0xfe && (chip->rev) != 0xfa) {
pr_alert(
"vortex: The revision (%x) of your card has not been seen before.\n",
chip->rev);
pr_alert(
"vortex: Please email the results of 'lspci -vv' to [email protected]\n");
snd_card_free(card);
err = -ENODEV;
return err;
}
#endif
// (6)
if ((err = snd_card_register(card)) < 0) {
snd_card_free(card);
return err;
}
// (7)
pci_set_drvdata(pci, card);
dev++;
vortex_connect_default(chip, 1);
vortex_enable_int(chip);
return 0;
}
示例2: diagchar_ioctl
long diagchar_ioctl(struct file *filp,
unsigned int iocmd, unsigned long ioarg)
{
int i, j, count_entries = 0, temp;
int success = -1;
void *temp_buf;
DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__,
current->comm, current->parent->comm, current->tgid);
if (iocmd == DIAG_IOCTL_COMMAND_REG) {
struct bindpkt_params_per_process *pkt_params =
(struct bindpkt_params_per_process *) ioarg;
mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < diag_max_reg; i++) {
if (driver->table[i].process_id == 0) {
diag_add_reg(i, pkt_params->params,
&success, &count_entries);
if (pkt_params->count > count_entries) {
pkt_params->params++;
} else {
mutex_unlock(&driver->diagchar_mutex);
return success;
}
}
}
if (i < diag_threshold_reg) {
/* Increase table size by amount required */
diag_max_reg += pkt_params->count -
count_entries;
/* Make sure size doesnt go beyond threshold */
if (diag_max_reg > diag_threshold_reg) {
diag_max_reg = diag_threshold_reg;
pr_info("diag: best case memory allocation\n");
}
temp_buf = krealloc(driver->table,
diag_max_reg*sizeof(struct
diag_master_table), GFP_KERNEL);
if (!temp_buf) {
diag_max_reg -= pkt_params->count -
count_entries;
pr_alert("diag: Insufficient memory for reg.");
mutex_unlock(&driver->diagchar_mutex);
return 0;
} else {
driver->table = temp_buf;
}
for (j = i; j < diag_max_reg; j++) {
diag_add_reg(j, pkt_params->params,
&success, &count_entries);
if (pkt_params->count > count_entries) {
pkt_params->params++;
} else {
mutex_unlock(&driver->diagchar_mutex);
return success;
}
}
mutex_unlock(&driver->diagchar_mutex);
} else {
mutex_unlock(&driver->diagchar_mutex);
pr_err("Max size reached, Pkt Registration failed for"
" Process %d", current->tgid);
}
success = 0;
} else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
struct diagpkt_delay_params *delay_params =
(struct diagpkt_delay_params *) ioarg;
if ((delay_params->rsp_ptr) &&
(delay_params->size == sizeof(delayed_rsp_id)) &&
(delay_params->num_bytes_ptr)) {
*((uint16_t *)delay_params->rsp_ptr) =
DIAGPKT_NEXT_DELAYED_RSP_ID(delayed_rsp_id);
*(delay_params->num_bytes_ptr) = sizeof(delayed_rsp_id);
success = 0;
}
} else if (iocmd == DIAG_IOCTL_LSM_DEINIT) {
for (i = 0; i < driver->num_clients; i++)
if (driver->client_map[i].pid == current->tgid)
break;
if (i == -1)
return -EINVAL;
driver->data_ready[i] |= DEINIT_TYPE;
wake_up_interruptible(&driver->wait_q);
success = 1;
} else if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) {
mutex_lock(&driver->diagchar_mutex);
temp = driver->logging_mode;
driver->logging_mode = (int)ioarg;
if (driver->logging_mode == UART_MODE)
driver->logging_mode = MEMORY_DEVICE_MODE;
driver->logging_process_id = current->tgid;
mutex_unlock(&driver->diagchar_mutex);
if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
== NO_LOGGING_MODE) {
driver->in_busy_1 = 1;
driver->in_busy_2 = 1;
driver->in_busy_qdsp_1 = 1;
driver->in_busy_qdsp_2 = 1;
driver->in_busy_wcnss = 1;
//.........这里部分代码省略.........
示例3: diagchar_open
static int diagchar_open(struct inode *inode, struct file *file)
{
int i = 0;
void *temp;
if (driver) {
mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
if (driver->client_map[i].pid == 0)
break;
if (i < driver->num_clients) {
diag_add_client(i, file);
} else {
if (i < threshold_client_limit) {
driver->num_clients++;
temp = krealloc(driver->client_map
, (driver->num_clients) * sizeof(struct
diag_client_map), GFP_KERNEL);
if (!temp)
goto fail;
else
driver->client_map = temp;
temp = krealloc(driver->data_ready
, (driver->num_clients) * sizeof(int),
GFP_KERNEL);
if (!temp)
goto fail;
else
driver->data_ready = temp;
diag_add_client(i, file);
} else {
mutex_unlock(&driver->diagchar_mutex);
pr_alert("Max client limit for DIAG reached\n");
pr_info("Cannot open handle %s"
" %d", current->comm, current->tgid);
for (i = 0; i < driver->num_clients; i++)
pr_debug("%d) %s PID=%d", i, driver->
client_map[i].name,
driver->client_map[i].pid);
return -ENOMEM;
}
}
driver->data_ready[i] = 0x0;
driver->data_ready[i] |= MSG_MASKS_TYPE;
driver->data_ready[i] |= EVENT_MASKS_TYPE;
driver->data_ready[i] |= LOG_MASKS_TYPE;
if (driver->ref_count == 0)
diagmem_init(driver);
driver->ref_count++;
mutex_unlock(&driver->diagchar_mutex);
return 0;
}
return -ENOMEM;
fail:
mutex_unlock(&driver->diagchar_mutex);
driver->num_clients--;
pr_alert("diag: Insufficient memory for new client");
return -ENOMEM;
}
示例4: pm_callback_power_off
static void pm_callback_power_off(struct kbase_device *kbdev)
{
unsigned int uiCurrentFreqCount;
volatile int polling_count = 100000;
volatile int i = 0;
struct mtk_config *config;
if (!kbdev) {
pr_alert("MALI: input parameter is NULL \n");
}
config = (struct mtk_config *)kbdev->mtk_config;
if (!config) {
pr_alert("MALI: mtk_config is NULL \n");
}
/// 1. Delay 0.01ms before power off
for (i=0; i < DELAY_LOOP_COUNT;i++);
if (DELAY_LOOP_COUNT != i)
{
pr_warn("[MALI] power off delay error!\n");
}
/// 2. Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms)
MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG);
do {
/// 0x13000184[2]
/// 1'b1: bus idle
/// 1'b0: bus busy
if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT)
{
/// printk("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count);
break;
}
} while (polling_count--);
if (polling_count <=0)
{
pr_warn("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count);
}
#if HARD_RESET_AT_POWER_OFF
/* Cause a GPU hard reset to test whether we have actually idled the GPU
* and that we properly reconfigure the GPU on power up.
* Usually this would be dangerous, but if the GPU is working correctly it should
* be completely safe as the GPU should not be active at this point.
* However this is disabled normally because it will most likely interfere with
* bus logging etc.
*/
//KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
/// Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms)
MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG);
do {
/// 0x13000184[2]
/// 1'b1: bus idle
/// 1'b0: bus busy
if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT)
{
/// printk("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count);
break;
}
} while (polling_count--);
if (polling_count <=0)
{
printk("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count);
}
g_power_off_gpu_freq_idx = mt_gpufreq_get_cur_freq_index(); // record current freq. index.
//printk("MALI: GPU power off freq idx : %d\n",g_power_off_gpu_freq_idx );
#if 1
uiCurrentFreqCount = mt_gpufreq_get_dvfs_table_num(); // get freq. table size
mt_gpufreq_target(uiCurrentFreqCount-1); // set gpu to lowest freq.
#endif
/* MTK clock modified */
#ifdef CONFIG_MTK_CLKMGR
disable_clock( MT_CG_MFG_BG3D, "GPU");
disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU");
#endif
if(mt6325_upmu_get_swcid() >= PMIC6325_E3_CID_CODE)
{
mt_gpufreq_voltage_enable_set(0);
}
#ifdef ENABLE_COMMON_DVFS
ged_dvfs_gpu_clock_switch_notify(0);
#endif
mtk_set_vgpu_power_on_flag(MTK_VGPU_POWER_OFF); // the power status is "power off".
#endif
}
示例5: diagchar_write
static int diagchar_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int err, ret = 0, pkt_type;
int length = 0, i;
struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
void *buf_copy = NULL;
unsigned int payload_size;
#ifdef CONFIG_DIAG_OVER_USB
if (((driver->logging_mode == USB_MODE) && (!driver->usb_connected)) ||
(driver->logging_mode == NO_LOGGING_MODE)) {
/*Drop the diag payload */
return -EIO;
}
#endif /* DIAG over USB */
/* Get the packet type F3/log/event/Pkt response */
err = copy_from_user((&pkt_type), buf, 4);
/* First 4 bytes indicate the type of payload - ignore these */
if (count < 4) {
pr_err("diag: Client sending short data\n");
return -EBADMSG;
}
payload_size = count - 4;
if (payload_size > USER_SPACE_DATA) {
pr_err("diag: Dropping packet, packet payload size crosses 8KB limit. Current payload size %d\n",
payload_size);
driver->dropped_count++;
return -EBADMSG;
}
if (pkt_type == DCI_DATA_TYPE) {
err = copy_from_user(driver->user_space_data, buf + 4,
payload_size);
if (err) {
pr_alert("diag: copy failed for DCI data\n");
return DIAG_DCI_SEND_DATA_FAIL;
}
err = diag_process_dci_client(driver->user_space_data,
payload_size);
return err;
}
if (pkt_type == USER_SPACE_LOG_TYPE) {
err = copy_from_user(driver->user_space_data, buf + 4,
payload_size);
/* Check masks for On-Device logging */
if (driver->mask_check) {
if (!mask_request_validate(driver->user_space_data)) {
pr_alert("diag: mask request Invalid\n");
return -EFAULT;
}
}
buf = buf + 4;
diag_printk(1,"diag:%s user space data %d\n",__func__, payload_size);
for (i = 0; i < payload_size; i++)
diag_printk(1,"\t %x", *((driver->user_space_data)+i));
#ifdef CONFIG_DIAG_SDIO_PIPE
/* send masks to 9k too */
if (driver->sdio_ch) {
wait_event_interruptible(driver->wait_q,
(sdio_write_avail(driver->sdio_ch) >=
payload_size));
if (driver->sdio_ch && (payload_size > 0)) {
sdio_write(driver->sdio_ch, (void *)
(driver->user_space_data), payload_size);
}
}
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
/* send masks to 9k too */
if (driver->hsic_ch && (payload_size > 0)) {
/* wait sending mask updates if HSIC ch not ready */
if (driver->in_busy_hsic_write)
wait_event_interruptible(driver->wait_q,
(driver->in_busy_hsic_write != 1));
driver->in_busy_hsic_write = 1;
driver->in_busy_hsic_read_on_device = 0;
err = diag_bridge_write(driver->user_space_data,
payload_size);
if (err) {
pr_err("diag: err sending mask to MDM: %d\n",
err);
/*
* If the error is recoverable, then clear
* the write flag, so we will resubmit a
* write on the next frame. Otherwise, don't
* resubmit a write on the next frame.
*/
if ((-ESHUTDOWN) != err)
driver->in_busy_hsic_write = 0;
}
}
#endif
/* send masks to 8k now */
diag_process_hdlc((void *)(driver->user_space_data),
payload_size);
return 0;
}
//.........这里部分代码省略.........
示例6: fdev_init
static int fdev_init(void)
{
int result = 0;
char *name = "firstdev";
pr_alert("DEVICE:%s\n", name);
pr_alert("The process is \"%s\" (pid %i)\n",
current->comm, current->pid);
pr_alert("UTS_RELEASE:%s", UTS_RELEASE);
pr_alert("KERNEL_VERSION:%d", KERNEL_VERSION(2, 6, 10));
unsigned int firstminor = 0;
int err;
err = alloc_chrdev_region(&dev, firstminor, count, name);
if (!err) {
pr_alert("alloc_chrdev_region successful.");
pr_alert("dev_t:%d,Major=%d,Minor=%d",
dev, MAJOR(dev), MINOR(dev));
} else {
pr_alert("alloc_chrdev_region failed.");
}
fdev_p = kmalloc_array(count, sizeof(struct fdev), GFP_KERNEL);
if (!fdev_p) {
result = -ENOMEM;
pr_alert("kmalloc fdev_p failed.");
goto fail;
} else {
pr_alert("kmalloc fdev_p successful.");
}
memset(fdev_p, 0, count * sizeof(struct fdev));
int i, major, devno;
major = MAJOR(dev);
for (i = 0; i < count; ++i) {
struct fdev *devp = &fdev_p[i];
sema_init(&devp->sem, 1);
devno = MKDEV(major, i);
devp->major = major;
devp->minor = i;
devp->quantum_count = QUANTUM_DEFAULT;
devp->qset_count = QSET_DEFAULT;
cdev_init(&devp->cdev, &fops);
devp->cdev.owner = THIS_MODULE;
devp->cdev.ops = &fops;
err = cdev_add(&devp->cdev, devno, 1);
if (err)
pr_alert("Error %d adding firstdev %d", err, i);
else
pr_alert("Successful adding firstdev %d", i);
}
return 0;
fail:
fdev_exit();
return result;
}
示例7: sigd_send
static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct atmsvc_msg *msg;
struct atm_vcc *session_vcc;
struct sock *sk;
msg = (struct atmsvc_msg *) skb->data;
atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
vcc = *(struct atm_vcc **) &msg->vcc;
pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
sk = sk_atm(vcc);
switch (msg->type) {
case as_okay:
sk->sk_err = -msg->reply;
clear_bit(ATM_VF_WAITING, &vcc->flags);
if (!*vcc->local.sas_addr.prv && !*vcc->local.sas_addr.pub) {
vcc->local.sas_family = AF_ATMSVC;
memcpy(vcc->local.sas_addr.prv,
msg->local.sas_addr.prv, ATM_ESA_LEN);
memcpy(vcc->local.sas_addr.pub,
msg->local.sas_addr.pub, ATM_E164_LEN + 1);
}
session_vcc = vcc->session ? vcc->session : vcc;
if (session_vcc->vpi || session_vcc->vci)
break;
session_vcc->itf = msg->pvc.sap_addr.itf;
session_vcc->vpi = msg->pvc.sap_addr.vpi;
session_vcc->vci = msg->pvc.sap_addr.vci;
if (session_vcc->vpi || session_vcc->vci)
session_vcc->qos = msg->qos;
break;
case as_error:
clear_bit(ATM_VF_REGIS, &vcc->flags);
clear_bit(ATM_VF_READY, &vcc->flags);
sk->sk_err = -msg->reply;
clear_bit(ATM_VF_WAITING, &vcc->flags);
break;
case as_indicate:
vcc = *(struct atm_vcc **)&msg->listen_vcc;
sk = sk_atm(vcc);
pr_debug("as_indicate!!!\n");
lock_sock(sk);
if (sk_acceptq_is_full(sk)) {
sigd_enq(NULL, as_reject, vcc, NULL, NULL);
dev_kfree_skb(skb);
goto as_indicate_complete;
}
sk->sk_ack_backlog++;
skb_queue_tail(&sk->sk_receive_queue, skb);
pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
sk->sk_state_change(sk);
as_indicate_complete:
release_sock(sk);
return 0;
case as_close:
set_bit(ATM_VF_RELEASED, &vcc->flags);
vcc_release_async(vcc, msg->reply);
goto out;
case as_modify:
modify_qos(vcc, msg);
break;
case as_addparty:
case as_dropparty:
sk->sk_err_soft = msg->reply;
/* < 0 failure, otherwise ep_ref */
clear_bit(ATM_VF_WAITING, &vcc->flags);
break;
default:
pr_alert("bad message type %d\n", (int)msg->type);
return -EINVAL;
}
sk->sk_state_change(sk);
out:
dev_kfree_skb(skb);
return 0;
}
示例8: ion_test_exit
static void ion_test_exit(void)
{
misc_deregister(&ion_test_dev);
pr_alert("%s\n", __func__);
}
示例9: qfp_fuse_ioctl
static long
qfp_fuse_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int err = 0;
struct qfp_fuse_req req;
u32 fuse_buf[QFP_FUSE_BUF_SIZE];
u32 *buf = fuse_buf;
u32 *ptr = NULL;
int i;
/* Verify user arguments. */
if (_IOC_TYPE(cmd) != QFP_FUSE_IOC_MAGIC)
return -ENOTTY;
switch (cmd) {
case QFP_FUSE_IOC_READ:
if (arg == 0) {
pr_err("user space arg not supplied\n");
err = -EFAULT;
break;
}
if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
pr_err("Error copying req from user space\n");
err = -EFAULT;
break;
}
/* Check for limits */
if (is_usr_req_valid(&req) == false) {
pr_err("Invalid request\n");
err = -EINVAL;
break;
}
if (req.size > QFP_FUSE_BUF_SIZE) {
/* Allocate memory for buffer */
ptr = kzalloc(req.size * 4, GFP_KERNEL);
if (ptr == NULL) {
pr_alert("No memory for data\n");
err = -ENOMEM;
break;
}
buf = ptr;
}
if (mutex_lock_interruptible(&qfp_priv->lock)) {
err = -ERESTARTSYS;
break;
}
/* Read data */
for (i = 0; i < req.size; i++)
buf[i] = readl_relaxed(
((u32 *) (qfp_priv->base + req.offset)) + i);
if (copy_to_user((void __user *)req.data, buf, 4*(req.size))) {
pr_err("Error copying to user space\n");
err = -EFAULT;
}
mutex_unlock(&qfp_priv->lock);
break;
case QFP_FUSE_IOC_WRITE:
if (arg == 0) {
pr_err("user space arg not supplied\n");
err = -EFAULT;
break;
}
if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
pr_err("Error copying req from user space\n");
err = -EFAULT;
break;
}
/* Check for limits */
if (is_usr_req_valid(&req) == false) {
pr_err("Invalid request\n");
err = -EINVAL;
break;
}
if (req.size > QFP_FUSE_BUF_SIZE) {
/* Allocate memory for buffer */
ptr = kzalloc(req.size * 4, GFP_KERNEL);
if (ptr == NULL) {
pr_alert("No memory for data\n");
err = -ENOMEM;
break;
}
buf = ptr;
}
/* Copy user data to local buffer */
if (copy_from_user(buf, (void __user *)req.data,
4 * (req.size))) {
pr_err("Error copying data from user space\n");
err = -EFAULT;
break;
//.........这里部分代码省略.........
示例10: do_page_fault
//.........这里部分代码省略.........
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {
pr_info("%s: unhandled page fault (%d) at 0x%08lx, "
"cause %ld\n", current->comm, SIGSEGV, address, cause);
show_regs(regs);
}
_exception(SIGSEGV, regs, code, address);
return;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
pr_alert("Unable to handle kernel %s at virtual address %08lx",
address < PAGE_SIZE ? "NULL pointer dereference" :
"paging request", address);
pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra,
cause);
panic("Oops");
return;
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (is_global_init(tsk)) {
yield();
down_read(&mm->mmap_sem);
goto survive;
}
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
_exception(SIGBUS, regs, BUS_ADRERR, address);
return;
示例11: do_page_fault
/*
* This routine handles page faults. It determines the address, and the
* problem, and then passes it handle_page_fault() for normal DTLB and
* ITLB issues, and for DMA or SN processor faults when we are in user
* space. For the latter, if we're in kernel mode, we just save the
* interrupt away appropriately and return immediately. We can't do
* page faults for user code while in kernel mode.
*/
void do_page_fault(struct pt_regs *regs, int fault_num,
unsigned long address, unsigned long write)
{
int is_page_fault;
#ifdef CONFIG_KPROBES
/*
* This is to notify the fault handler of the kprobes. The
* exception code is redundant as it is also carried in REGS,
* but we pass it anyhow.
*/
if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
regs->faultnum, SIGSEGV) == NOTIFY_STOP)
return;
#endif
#ifdef __tilegx__
/*
* We don't need early do_page_fault_ics() support, since unlike
* Pro we don't need to worry about unlocking the atomic locks.
* There is only one current case in GX where we touch any memory
* under ICS other than our own kernel stack, and we handle that
* here. (If we crash due to trying to touch our own stack,
* we're in too much trouble for C code to help out anyway.)
*/
if (write & ~1) {
unsigned long pc = write & ~1;
if (pc >= (unsigned long) __start_unalign_asm_code &&
pc < (unsigned long) __end_unalign_asm_code) {
struct thread_info *ti = current_thread_info();
/*
* Our EX_CONTEXT is still what it was from the
* initial unalign exception, but now we've faulted
* on the JIT page. We would like to complete the
* page fault however is appropriate, and then retry
* the instruction that caused the unalign exception.
* Our state has been "corrupted" by setting the low
* bit in "sp", and stashing r0..r3 in the
* thread_info area, so we revert all of that, then
* continue as if this were a normal page fault.
*/
regs->sp &= ~1UL;
regs->regs[0] = ti->unalign_jit_tmp[0];
regs->regs[1] = ti->unalign_jit_tmp[1];
regs->regs[2] = ti->unalign_jit_tmp[2];
regs->regs[3] = ti->unalign_jit_tmp[3];
write &= 1;
} else {
pr_alert("%s/%d: ICS set at page fault at %#lx: %#lx\n",
current->comm, current->pid, pc, address);
show_regs(regs);
do_group_exit(SIGKILL);
return;
}
}
#else
/* This case should have been handled by do_page_fault_ics(). */
BUG_ON(write & ~1);
#endif
#if CHIP_HAS_TILE_DMA()
/*
* If it's a DMA fault, suspend the transfer while we're
* handling the miss; we'll restart after it's handled. If we
* don't suspend, it's possible that this process could swap
* out and back in, and restart the engine since the DMA is
* still 'running'.
*/
if (fault_num == INT_DMATLB_MISS ||
fault_num == INT_DMATLB_ACCESS ||
fault_num == INT_DMATLB_MISS_DWNCL ||
fault_num == INT_DMATLB_ACCESS_DWNCL) {
__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
while (__insn_mfspr(SPR_DMA_USER_STATUS) &
SPR_DMA_STATUS__BUSY_MASK)
;
}
#endif
/* Validate fault num and decide if this is a first-time page fault. */
switch (fault_num) {
case INT_ITLB_MISS:
case INT_DTLB_MISS:
#if CHIP_HAS_TILE_DMA()
case INT_DMATLB_MISS:
case INT_DMATLB_MISS_DWNCL:
#endif
is_page_fault = 1;
break;
case INT_DTLB_ACCESS:
#if CHIP_HAS_TILE_DMA()
//.........这里部分代码省略.........
示例12: diagchar_ioctl
long diagchar_ioctl(struct file *filp,
unsigned int iocmd, unsigned long ioarg)
{
int i, j, temp, success = -1;
unsigned int count_entries = 0, interim_count = 0;
void *temp_buf;
uint16_t support_list = 0;
struct dci_notification_tbl notify_params;
if (iocmd == DIAG_IOCTL_COMMAND_REG) {
struct bindpkt_params_per_process pkt_params;
struct bindpkt_params *params;
struct bindpkt_params *head_params;
if (copy_from_user(&pkt_params, (void *)ioarg,
sizeof(struct bindpkt_params_per_process))) {
return -EFAULT;
}
if ((UINT32_MAX/sizeof(struct bindpkt_params)) <
pkt_params.count) {
pr_alert("diag: integer overflow while multiply\n");
return -EFAULT;
}
params = kzalloc(pkt_params.count*sizeof(
struct bindpkt_params), GFP_KERNEL);
if (!params) {
pr_alert("diag: unable to alloc memory\n");
return -ENOMEM;
} else
head_params = params;
if (copy_from_user(params, pkt_params.params,
pkt_params.count*sizeof(struct bindpkt_params))) {
kfree(head_params);
return -EFAULT;
}
mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < diag_max_reg; i++) {
if (driver->table[i].process_id == 0) {
diag_add_reg(i, params, &success,
&count_entries);
if (pkt_params.count > count_entries) {
params++;
} else {
mutex_unlock(&driver->diagchar_mutex);
kfree(head_params);
return success;
}
}
}
if (i < diag_threshold_reg) {
/* Increase table size by amount required */
if (pkt_params.count >= count_entries) {
interim_count = pkt_params.count -
count_entries;
} else {
pr_alert("diag: error in params count\n");
kfree(head_params);
mutex_unlock(&driver->diagchar_mutex);
return -EFAULT;
}
if (UINT32_MAX - diag_max_reg >=
interim_count) {
diag_max_reg += interim_count;
} else {
pr_alert("diag: Integer overflow\n");
kfree(head_params);
mutex_unlock(&driver->diagchar_mutex);
return -EFAULT;
}
/* Make sure size doesnt go beyond threshold */
if (diag_max_reg > diag_threshold_reg) {
diag_max_reg = diag_threshold_reg;
pr_info("diag: best case memory allocation\n");
}
if (UINT32_MAX/sizeof(struct diag_master_table) <
diag_max_reg) {
pr_alert("diag: integer overflow\n");
kfree(head_params);
mutex_unlock(&driver->diagchar_mutex);
return -EFAULT;
}
temp_buf = krealloc(driver->table,
diag_max_reg*sizeof(struct
diag_master_table), GFP_KERNEL);
if (!temp_buf) {
pr_alert("diag: Insufficient memory for reg.\n");
mutex_unlock(&driver->diagchar_mutex);
if (pkt_params.count >= count_entries) {
interim_count = pkt_params.count -
count_entries;
} else {
pr_alert("diag: params count error\n");
mutex_unlock(&driver->diagchar_mutex);
kfree(head_params);
return -EFAULT;
}
if (diag_max_reg >= interim_count) {
diag_max_reg -= interim_count;
} else {
//.........这里部分代码省略.........
示例13: pretimeout_noop
/**
* pretimeout_noop - No operation on watchdog pretimeout event
* @wdd - watchdog_device
*
* This function prints a message about pretimeout to kernel log.
*/
static void pretimeout_noop(struct watchdog_device *wdd)
{
pr_alert("watchdog%d: pretimeout event\n", wdd->id);
}
示例14: check_tempk
static void __cpuinit check_tempk(struct work_struct *work)
{
unsigned int new_freq;
struct tsens_device tsens_dev;
long temp = 0;
int ret = 0;
tsens_dev.sensor_num = kmsm_thermal_info.sensor_id;
ret = tsens_get_temp(&tsens_dev, &temp);
if (ret) {
pr_debug("%s: Unable to read TSENS sensor %d\n",
KBUILD_MODNAME, tsens_dev.sensor_num);
goto reschedule;
}
//pr_alert("CHECK TEMP %lu-%d-%d\n", temp, kmsm_thermal_info.temp_limit_degC_start, kmsm_thermal_info.temp_limit_degC_stop);
kmsm_thermal_info.current_temp = temp;
if (temp >= kmsm_thermal_info.temp_limit_degC_start)
{
unsigned int i;
if (!kmsm_thermal_info.isthrottling)
{
//prev_freq = cpufreq_get(0);
thermal_get_freq_table();
pr_alert("START KTHROTTLING - current temp = %lu - set point = %d\n", temp, kmsm_thermal_info.temp_limit_degC_start);
}
kmsm_thermal_info.isthrottling = 1;
//policy = cpufreq_cpu_get(0);
//__cpufreq_driver_target(policy, 1296000, CPUFREQ_RELATION_H);
limit_idx -= kmsm_thermal_info.freq_steps_while_throttling;
if (limit_idx < limit_idx_low)
limit_idx = limit_idx_low;
for (i = 0; i < num_online_cpus(); i++)
{
//pr_alert("KTHROTTLING LOOP - current temp = %lu - set point = %d\n", temp, kmsm_thermal_info.temp_limit_degC_start);
if (cpu_online(i) && cpufreq_get(i) != table[limit_idx].frequency)
{
//pr_alert("KTHROTTLING LOOP IN IF - current temp = %lu - set point = %d\n", temp, kmsm_thermal_info.temp_limit_degC_start);
//policy = NULL;
//policy = cpufreq_cpu_get(i);
//if (policy != NULL)
// __cpufreq_driver_target(policy, 1296000, CPUFREQ_RELATION_H);
new_freq = table[limit_idx].frequency;
do_kthermal(i, new_freq);
}
}
}
else if (kmsm_thermal_info.isthrottling && temp > kmsm_thermal_info.temp_limit_degC_stop && temp < kmsm_thermal_info.temp_limit_degC_start)
{
unsigned int i;
for (i = 0; i < num_online_cpus(); i++)
{
if (cpu_online(i) && cpufreq_get(i) != table[limit_idx].frequency)
{
new_freq = table[limit_idx].frequency;
do_kthermal(i, new_freq);
}
}
}
else if (kmsm_thermal_info.isthrottling && temp <= kmsm_thermal_info.temp_limit_degC_stop)
{
unsigned int i;
bool stopThrottle = false;
//policy = cpufreq_cpu_get(0);
//if (prev_freq > 0)
// __cpufreq_driver_target(policy, prev_freq, CPUFREQ_RELATION_H);
limit_idx += kmsm_thermal_info.freq_steps_while_throttling;
if (limit_idx >= limit_idx_high)
{
limit_idx = limit_idx_high;
kmsm_thermal_info.isthrottling = 0;
stopThrottle = true;
pr_alert("STOP KTHROTTLING - current temp = %lu\n", temp);
}
for (i = 0; i < num_online_cpus(); i++)
{
if (cpu_online(i))
{
//policy = NULL;
//policy = cpufreq_cpu_get(i);
//if (prev_freq > 0 && policy != NULL)
// __cpufreq_driver_target(policy, prev_freq, CPUFREQ_RELATION_H);
//do_thermal(i, prev_freq);
new_freq = table[limit_idx].frequency;
do_kthermal(i, new_freq);
}
}
if (stopThrottle)
do_kthermal(0, 0);
}
reschedule:
schedule_delayed_work_on(0, &check_temp_workk,
msecs_to_jiffies(kmsm_thermal_info.poll_speed));
}
示例15: an30259a_set_led_blink
static void an30259a_set_led_blink(enum an30259a_led_enum led,
unsigned int delay_on_time,
unsigned int delay_off_time,
u8 brightness)
{
struct i2c_client *client;
client = b_client;
if (brightness == LED_OFF) {
leds_on(led, false, false, brightness);
return;
}
if (brightness > LED_MAX_CURRENT)
brightness = LED_MAX_CURRENT;
if (led == LED_R)
LED_DYNAMIC_CURRENT = LED_R_CURRENT;
else if (led == LED_G)
LED_DYNAMIC_CURRENT = LED_G_CURRENT;
else if (led == LED_B)
LED_DYNAMIC_CURRENT = LED_B_CURRENT;
/* In user case, LED current is restricted */
if (led_intensity == 0 || led_intensity == 40) { // if stock intesity is used (see LED_x_CURRENT = 0x28)
brightness = (brightness * LED_DYNAMIC_CURRENT) / LED_MAX_CURRENT;
}
else if (led_intensity != 0) { // adapt current to led_intensity
brightness = (brightness * led_intensity) / LED_MAX_CURRENT;
}
if (led_enable_fade_charging == 1)
{
if (led_time_on)
delay_on_time = led_time_on;
if (led_time_off)
delay_off_time = led_time_off;
}
if (delay_on_time > SLPTT_MAX_VALUE)
delay_on_time = SLPTT_MAX_VALUE;
if (delay_off_time > SLPTT_MAX_VALUE)
delay_off_time = SLPTT_MAX_VALUE;
if (delay_off_time == LED_OFF) {
leds_on(led, true, false, brightness);
if (brightness == LED_OFF)
leds_on(led, false, false, brightness);
return;
} else
leds_on(led, true, true, brightness);
if (led_time_on)
{
pr_alert("LED OVER-RIDE - DELAY_ON_Orig=%d, DELAY_OFF_Orig=%d, DELAY_ON_New=%d, DELAY_OFF_New=%d", delay_on_time, delay_off_time, led_time_on, led_time_off);
delay_on_time = led_time_on;
}
if (led_time_off)
{
pr_alert("LED OVER-RIDE - DELAY_ON_Orig=%d, DELAY_OFF_Orig=%d, DELAY_ON_New=%d, DELAY_OFF_New=%d", delay_on_time, delay_off_time, led_time_on, led_time_off);
delay_off_time = led_time_off;
}
if (led_enable_fade == 1) {
leds_set_slope_mode(client, led, 0, 30, 15, 0,
(delay_on_time + AN30259A_TIME_UNIT - 1) /
AN30259A_TIME_UNIT,
(delay_off_time + AN30259A_TIME_UNIT - 1) /
AN30259A_TIME_UNIT,
led_step_speed1, led_step_speed2, led_step_speed3, led_step_speed4);
}
else {
leds_set_slope_mode(client, led, 0, 15, 15, 0,
(delay_on_time + AN30259A_TIME_UNIT - 1) /
AN30259A_TIME_UNIT,
(delay_off_time + AN30259A_TIME_UNIT - 1) /
AN30259A_TIME_UNIT,
0, 0, 0, 0);
}
}