本文整理汇总了C++中pr_emerg函数的典型用法代码示例。如果您正苦于以下问题:C++ pr_emerg函数的具体用法?C++ pr_emerg怎么用?C++ pr_emerg使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pr_emerg函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: dump_page_badflags
void dump_page_badflags(struct page *page, const char *reason,
unsigned long badflags)
{
pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
page, atomic_read(&page->_count), page_mapcount(page),
page->mapping, page->index);
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
dump_flags(page->flags, pageflag_names, ARRAY_SIZE(pageflag_names));
if (reason)
pr_alert("page dumped because: %s\n", reason);
if (page->flags & badflags) {
pr_alert("bad because of flags:\n");
dump_flags(page->flags & badflags,
pageflag_names, ARRAY_SIZE(pageflag_names));
}
#ifdef CONFIG_MEMCG
if (page->mem_cgroup)
pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
#endif
}
示例2: arch_restart_cpu
void arch_restart_cpu(u32 cpu)
{
u32 timeout, val;
val = readl(DBG_DSCR(cpu));
val &= ~((0x1 << 14) | (0x1 << 13));
writel(val, DBG_DSCR(cpu));
/* Restart dest cpu */
writel(0x2, DBG_DRCR(cpu));
timeout = 10000;
do {
val = readl(DBG_DSCR(cpu));
if (val & (0x1 << 1))
break;
} while (--timeout);
if (!timeout)
pr_emerg("Cannot restart cpu%d\n", cpu);
}
示例3: mx_reboot_internal
static void mx_reboot_internal(const char *cmd)
{
local_irq_disable();
if(cmd) {
if (!strcmp(cmd, "charge"))
__raw_writel(REBOOT_MODE_CHARGE, S5P_INFORM4);
else if (!strcmp(cmd, "wipe"))
__raw_writel(REBOOT_MODE_WIPE, S5P_INFORM4);
else if (!strcmp(cmd, "upgrade"))
__raw_writel(REBOOT_MODE_UPGRADE, S5P_INFORM4);
}
flush_cache_all();
outer_flush_all();
arch_reset(0, 0);
pr_emerg("%s: waiting for reboot\n", __func__);
while (1) arch_reset(0, 0);
}
示例4: kvmppc_emulate_mmio
int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
enum emulation_result er;
int r;
er = kvmppc_emulate_loadstore(vcpu);
switch (er) {
case EMULATE_DONE:
/* Future optimization: only reload non-volatiles if they were
* actually modified. */
r = RESUME_GUEST_NV;
break;
case EMULATE_AGAIN:
r = RESUME_GUEST;
break;
case EMULATE_DO_MMIO:
run->exit_reason = KVM_EXIT_MMIO;
/* We must reload nonvolatiles because "update" load/store
* instructions modify register state. */
/* Future optimization: only reload non-volatiles if they were
* actually modified. */
r = RESUME_HOST_NV;
break;
case EMULATE_FAIL:
{
u32 last_inst;
kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
/* XXX Deliver Program interrupt to guest. */
pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
r = RESUME_HOST;
break;
}
default:
WARN_ON(1);
r = RESUME_GUEST;
}
return r;
}
示例5: arch_restart_cpu
void arch_restart_cpu(u32 cpu)
{
u32 timeout, val;
void __iomem *p_dbg_base = DBG_BASE(cpu);
void __iomem *p_cti_base = CTI_BASE(cpu);
/* Disable Halt Debug Mode */
val = readl(p_dbg_base + EDSCR);
val &= ~(0x1 << 14);
writel(val, p_dbg_base + EDSCR);
/* Enable CTI access */
cti_enable_access(cpu);
/* Enable CTI */
writel(0x1, p_cti_base + CTI_CTRL);
/* ACK the outut event */
writel(0x1, p_cti_base + CTI_INTACK);
/* Set output channel1 */
val = readl(p_cti_base + CTI_OUT1EN) | 0x2;
writel(val, p_cti_base + CTI_OUT1EN);
/* Trigger pulse event */
writel(0x2, p_cti_base + CTI_APP_PULSE);
/* Wait the cpu become running */
timeout = 10000;
do {
val = readl(p_dbg_base + EDPRSR);
if (!(val & (0x1 << 4)))
break;
} while (--timeout);
if (!timeout)
pr_emerg("Cannot restart cpu%d\n", cpu);
}
示例6: ras_error_interrupt
/*
* Handle hardware error interrupts.
*
* RTAS check-exception is called to collect data on the exception. If
* the error is deemed recoverable, we log a warning and return.
* For nonrecoverable errors, an error is logged and we stop all processing
* as quickly as possible in order to prevent propagation of the failure.
*/
static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
{
struct rtas_error_log *rtas_elog;
int status;
int fatal;
spin_lock(&ras_log_buf_lock);
status = rtas_call(ras_check_exception_token, 6, 1, NULL,
RTAS_VECTOR_EXTERNAL_INTERRUPT,
virq_to_hw(irq),
RTAS_INTERNAL_ERROR, 1 /* Time Critical */,
__pa(&ras_log_buf),
rtas_get_error_log_max());
rtas_elog = (struct rtas_error_log *)ras_log_buf;
if (status == 0 &&
rtas_error_severity(rtas_elog) >= RTAS_SEVERITY_ERROR_SYNC)
fatal = 1;
else
fatal = 0;
/* format and print the extended information */
log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
if (fatal) {
pr_emerg("Fatal hardware error detected. Check RTAS error"
" log for details. Powering off immediately\n");
emergency_sync();
kernel_power_off();
} else {
pr_err("Recoverable hardware error detected\n");
}
spin_unlock(&ras_log_buf_lock);
return IRQ_HANDLED;
}
示例7: s5m_rtc_shutdown
static void s5m_rtc_shutdown(struct platform_device *pdev)
{
struct s5m_rtc_info *info = platform_get_drvdata(pdev);
int i;
unsigned int val = 0;
if (info->wtsr_smpl) {
for (i = 0; i < 3; i++) {
s5m_rtc_enable_wtsr(info, false);
regmap_read(info->rtc, SEC_WTSR_SMPL_CNTL, &val);
pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val);
if (val & WTSR_ENABLE_MASK)
pr_emerg("%s: fail to disable WTSR\n",
__func__);
else {
pr_info("%s: success to disable WTSR\n",
__func__);
break;
}
}
}
/* Disable SMPL when power off */
s5m_rtc_enable_smpl(info, false);
}
示例8: add_kvm_device
/*
* adds a new device and register it with virtio
* appropriate drivers are loaded by the device model
*/
static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset)
{
struct kvm_device *kdev;
kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
if (!kdev) {
pr_emerg("Cannot allocate kvm dev %u type %u\n",
offset, d->type);
return;
}
kdev->vdev.dev.parent = kvm_root;
kdev->vdev.id.device = d->type;
kdev->vdev.config = &kvm_vq_config_ops;
kdev->desc = d;
kdev->desc_pa = PFN_PHYS(max_pfn) + offset;
if (register_virtio_device(&kdev->vdev) != 0) {
pr_err("Failed to register kvm device %u type %u\n",
offset, d->type);
kfree(kdev);
}
}
示例9: xilinx_timer_init
static void __init xilinx_timer_init(struct device_node *timer)
{
u32 irq;
u32 timer_num = 1;
int ret;
timer_baseaddr = of_iomap(timer, 0);
if (!timer_baseaddr) {
pr_err("ERROR: invalid timer base address\n");
BUG();
}
irq = irq_of_parse_and_map(timer, 0);
of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num);
if (timer_num) {
pr_emerg("Please enable two timers in HW\n");
BUG();
}
pr_info("%s: irq=%d\n", timer->full_name, irq);
/* If there is clock-frequency property than use it */
ret = of_property_read_u32(timer, "clock-frequency", &timer_clock_freq);
if (ret < 0)
timer_clock_freq = cpuinfo.cpu_clock_freq;
freq_div_hz = timer_clock_freq / HZ;
setup_irq(irq, &timer_irqaction);
#ifdef CONFIG_HEART_BEAT
setup_heartbeat();
#endif
xilinx_clocksource_init();
xilinx_clockevent_init();
timer_initialized = 1;
}
示例10: dump_backtrace
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (!tsk)
tsk = current;
if (regs) {
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
} else if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)dump_backtrace;
} else {
/*
* task blocked in __switch_to
*/
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
}
pr_emerg("Call trace:\n");
while (1) {
unsigned long where = frame.pc;
int ret;
ret = unwind_frame(&frame);
if (ret < 0)
break;
dump_backtrace_entry(where, frame.sp);
}
}
示例11: dump_cpu_hwcaps
static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
{
/* file-wide pr_fmt adds "CPU features: " prefix */
pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
return 0;
}
示例12: panic
/**
* panic - halt the system
* @fmt: The text string to print
*
* Display a message, then perform cleanups.
*
* This function never returns.
*/
void panic(const char *fmt, ...)
{
static char buf[1024];
va_list args;
long i, i_next = 0, len;
int state = 0;
int old_cpu, this_cpu;
bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
/*
* Disable local interrupts. This will prevent panic_smp_self_stop
* from deadlocking the first cpu that invokes the panic, since
* there is nothing to prevent an interrupt handler (that runs
* after setting panic_cpu) from invoking panic() again.
*/
local_irq_disable();
/*
* It's possible to come here directly from a panic-assertion and
* not have preempt disabled. Some functions called from here want
* preempt to be disabled. No point enabling it later though...
*
* Only one CPU is allowed to execute the panic code from here. For
* multiple parallel invocations of panic, all other CPUs either
* stop themself or will wait until they are stopped by the 1st CPU
* with smp_send_stop().
*
* `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
* comes here, so go ahead.
* `old_cpu == this_cpu' means we came from nmi_panic() which sets
* panic_cpu to this CPU. In this case, this is also the 1st CPU.
*/
this_cpu = raw_smp_processor_id();
old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
panic_smp_self_stop();
console_verbose();
bust_spinlocks(1);
va_start(args, fmt);
len = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
if (len && buf[len - 1] == '\n')
buf[len - 1] = '\0';
pr_emerg("Kernel panic - not syncing: %s\n", buf);
#ifdef CONFIG_DEBUG_BUGVERBOSE
/*
* Avoid nested stack-dumping if a panic occurs during oops processing
*/
if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
dump_stack();
#endif
/*
* If we have crashed and we have a crash kernel loaded let it handle
* everything else.
* If we want to run this after calling panic_notifiers, pass
* the "crash_kexec_post_notifiers" option to the kernel.
*
* Bypass the panic_cpu check and call __crash_kexec directly.
*/
if (!_crash_kexec_post_notifiers) {
printk_safe_flush_on_panic();
__crash_kexec(NULL);
/*
* Note smp_send_stop is the usual smp shutdown function, which
* unfortunately means it may not be hardened to work in a
* panic situation.
*/
smp_send_stop();
} else {
/*
* If we want to do crash dump after notifier calls and
* kmsg_dump, we will need architecture dependent extra
* works in addition to stopping other CPUs.
*/
crash_smp_send_stop();
}
/*
* Run any panic handlers, including those that might need to
* add information to the kmsg dump output.
*/
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
/* Call flush even twice. It tries harder with a single online CPU */
printk_safe_flush_on_panic();
kmsg_dump(KMSG_DUMP_PANIC);
//.........这里部分代码省略.........
示例13: mpi_set_buffer
int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign)
{
const uint8_t *buffer = xbuffer, *p;
mpi_limb_t alimb;
int nlimbs;
int i;
nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
if (RESIZE_IF_NEEDED(a, nlimbs) < 0)
return -ENOMEM;
a->sign = sign;
for (i = 0, p = buffer + nbytes - 1; p >= buffer + BYTES_PER_MPI_LIMB;) {
#if BYTES_PER_MPI_LIMB == 4
alimb = (mpi_limb_t) *p--;
alimb |= (mpi_limb_t) *p-- << 8;
alimb |= (mpi_limb_t) *p-- << 16;
alimb |= (mpi_limb_t) *p-- << 24;
#elif BYTES_PER_MPI_LIMB == 8
alimb = (mpi_limb_t) *p--;
alimb |= (mpi_limb_t) *p-- << 8;
alimb |= (mpi_limb_t) *p-- << 16;
alimb |= (mpi_limb_t) *p-- << 24;
alimb |= (mpi_limb_t) *p-- << 32;
alimb |= (mpi_limb_t) *p-- << 40;
alimb |= (mpi_limb_t) *p-- << 48;
alimb |= (mpi_limb_t) *p-- << 56;
#else
#error please implement for this limb size.
#endif
a->d[i++] = alimb;
}
if (p >= buffer) {
#if BYTES_PER_MPI_LIMB == 4
alimb = *p--;
if (p >= buffer)
alimb |= (mpi_limb_t) *p-- << 8;
if (p >= buffer)
alimb |= (mpi_limb_t) *p-- << 16;
if (p >= buffer)
alimb |= (mpi_limb_t) *p-- << 24;
#elif BYTES_PER_MPI_LIMB == 8
alimb = (mpi_limb_t) *p--;
if (p >= buffer)
alimb |= (mpi_limb_t) *p-- << 8;
if (p >= buffer)
alimb |= (mpi_limb_t) *p-- << 16;
if (p >= buffer)
alimb |= (mpi_limb_t) *p-- << 24;
if (p >= buffer)
alimb |= (mpi_limb_t) *p-- << 32;
if (p >= buffer)
alimb |= (mpi_limb_t) *p-- << 40;
if (p >= buffer)
alimb |= (mpi_limb_t) *p-- << 48;
if (p >= buffer)
alimb |= (mpi_limb_t) *p-- << 56;
#else
#error please implement for this limb size.
#endif
a->d[i++] = alimb;
}
a->nlimbs = i;
if (i != nlimbs) {
pr_emerg("MPI: mpi_set_buffer: Assertion failed (%d != %d)", i,
nlimbs);
BUG();
}
return 0;
}
示例14: throttle_delayed_work_fn
/*
* Check if the die sensor is cooling down. If it's higher than
* t_hot since the last throttle then throttle it again.
* OMAP junction temperature could stay for a long time in an
* unacceptable temperature range. The idea here is to check after
* t_hot->throttle the system really came below t_hot else re-throttle
* and keep doing till it's under t_hot temp range.
*/
static void throttle_delayed_work_fn(struct work_struct *work)
{
int curr;
struct omap_temp_sensor *temp_sensor =
container_of(work, struct omap_temp_sensor,
throttle_work.work);
curr = omap_read_current_temp(temp_sensor);
#ifdef CONFIG_OMAP_TEMP_CONTROL
if (curr >= temp_limit || curr < 0) {
#else
if (curr >= BGAP_THRESHOLD_T_HOT || curr < 0) {
#endif
pr_warn("%s: OMAP temp read %d exceeds the threshold\n",
__func__, curr);
omap_thermal_throttle();
schedule_delayed_work(&temp_sensor->throttle_work,
msecs_to_jiffies(THROTTLE_DELAY_MS));
} else {
schedule_delayed_work(&temp_sensor->throttle_work,
msecs_to_jiffies(THROTTLE_DELAY_MS));
}
}
static irqreturn_t omap_tshut_irq_handler(int irq, void *data)
{
struct omap_temp_sensor *temp_sensor = (struct omap_temp_sensor *)data;
/* Need to handle thermal mgmt in bootloader
* to avoid restart again at kernel level
*/
if (temp_sensor->is_efuse_valid) {
pr_emerg("%s: Thermal shutdown reached rebooting device\n",
__func__);
kernel_restart(NULL);
} else {
pr_err("%s:Invalid EFUSE, Non-trimmed BGAP\n", __func__);
}
return IRQ_HANDLED;
}
static irqreturn_t omap_talert_irq_handler(int irq, void *data)
{
struct omap_temp_sensor *temp_sensor = (struct omap_temp_sensor *)data;
int t_hot, t_cold, temp_offset;
t_hot = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET)
& OMAP4_HOT_FLAG_MASK;
t_cold = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET)
& OMAP4_COLD_FLAG_MASK;
temp_offset = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET);
if (t_hot) {
omap_thermal_throttle();
schedule_delayed_work(&temp_sensor->throttle_work,
msecs_to_jiffies(THROTTLE_DELAY_MS));
temp_offset &= ~(OMAP4_MASK_HOT_MASK);
temp_offset |= OMAP4_MASK_COLD_MASK;
} else if (t_cold) {
cancel_delayed_work_sync(&temp_sensor->throttle_work);
omap_thermal_unthrottle();
temp_offset &= ~(OMAP4_MASK_COLD_MASK);
temp_offset |= OMAP4_MASK_HOT_MASK;
}
omap_temp_sensor_writel(temp_sensor, temp_offset, BGAP_CTRL_OFFSET);
return IRQ_HANDLED;
}
static int __devinit omap_temp_sensor_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_temp_sensor_pdata *pdata = pdev->dev.platform_data;
struct omap_temp_sensor *temp_sensor;
struct resource *mem;
int ret = 0, val;
if (!pdata) {
dev_err(dev, "%s: platform data missing\n", __func__);
return -EINVAL;
}
temp_sensor = kzalloc(sizeof(struct omap_temp_sensor), GFP_KERNEL);
if (!temp_sensor)
return -ENOMEM;
spin_lock_init(&temp_sensor->lock);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(dev, "%s:no mem resource\n", __func__);
//.........这里部分代码省略.........
示例15: omap_fatal_zone
/**
* omap_fatal_zone() - Shut-down the system to ensure OMAP Junction
* Temperature decreases enough
*
* @cpu_temp: The current adjusted CPU temperature
*
* No return forces a restart of the system
*/
static void omap_fatal_zone(int cpu_temp)
{
pr_emerg("%s:FATAL ZONE (hot spot temp: %i)\n", __func__, cpu_temp);
kernel_restart(NULL);
}