本文整理汇总了C++中set_tsk_thread_flag函数的典型用法代码示例。如果您正苦于以下问题:C++ set_tsk_thread_flag函数的具体用法?C++ set_tsk_thread_flag怎么用?C++ set_tsk_thread_flag使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了set_tsk_thread_flag函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: copy_thread
int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
struct pt_regs *childregs;
struct task_struct *tsk;
int err;
childregs = task_pt_regs(p);
*childregs = *regs;
childregs->ax = 0;
childregs->sp = sp;
p->thread.sp = (unsigned long) childregs;
p->thread.sp0 = (unsigned long) (childregs+1);
p->thread.ip = (unsigned long) ret_from_fork;
task_user_gs(p) = get_user_gs(regs);
p->thread.io_bitmap_ptr = NULL;
tsk = current;
err = -ENOMEM;
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
IO_BITMAP_BYTES, GFP_KERNEL);
if (!p->thread.io_bitmap_ptr) {
p->thread.io_bitmap_max = 0;
return -ENOMEM;
}
set_tsk_thread_flag(p, TIF_IO_BITMAP);
}
err = 0;
/*
* Set a new TLS for the child thread?
*/
if (clone_flags & CLONE_SETTLS)
err = do_set_thread_area(p, -1,
(struct user_desc __user *)childregs->si, 0);
if (err && p->thread.io_bitmap_ptr) {
kfree(p->thread.io_bitmap_ptr);
p->thread.io_bitmap_max = 0;
}
return err;
}
示例2: set_single_step
static void set_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC;
regs->msr |= MSR_DE;
#else
regs->msr |= MSR_SE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
示例3: arch_uprobe_pre_xol
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT)
return -EINVAL;
if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT)
return -EINVAL;
clear_pt_regs_flag(regs, PIF_PER_TRAP);
auprobe->saved_per = psw_bits(regs->psw).per;
auprobe->saved_int_code = regs->int_code;
regs->int_code = UPROBE_TRAP_NR;
regs->psw.addr = current->utask->xol_vaddr;
set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
update_cr_regs(current);
return 0;
}
示例4: ptrace_set_watch_regs
int ptrace_set_watch_regs(struct task_struct *child,
struct pt_watch_regs __user *addr)
{
int i;
int watch_active = 0;
unsigned long lt[NUM_WATCH_REGS];
u16 ht[NUM_WATCH_REGS];
if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
return -EIO;
if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
return -EIO;
/* Check the values. */
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
#ifdef CONFIG_32BIT
if (lt[i] & __UA_LIMIT)
return -EINVAL;
#else
if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
if (lt[i] & 0xffffffff80000000UL)
return -EINVAL;
} else {
if (lt[i] & __UA_LIMIT)
return -EINVAL;
}
#endif
__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
if (ht[i] & ~MIPS_WATCHHI_MASK)
return -EINVAL;
}
/* Install them. */
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
if (lt[i] & MIPS_WATCHLO_IRW)
watch_active = 1;
child->thread.watch.mips3264.watchlo[i] = lt[i];
/* Set the G bit. */
child->thread.watch.mips3264.watchhi[i] = ht[i];
}
if (watch_active)
set_tsk_thread_flag(child, TIF_LOAD_WATCH);
else
clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
return 0;
}
示例5: seccomp_assign_mode
static inline void seccomp_assign_mode(struct task_struct *task,
unsigned long seccomp_mode,
unsigned long flags)
{
assert_spin_locked(&task->sighand->siglock);
task->seccomp.mode = seccomp_mode;
/*
* Make sure TIF_SECCOMP cannot be set before the mode (and
* filter) is set.
*/
smp_mb__before_atomic();
/* Assume default seccomp processes want spec flaw mitigation. */
if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
arch_seccomp_spec_mitigate(task);
set_tsk_thread_flag(task, TIF_SECCOMP);
}
示例6: user_enable_single_step
void user_enable_single_step(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
set_tsk_thread_flag(task, TIF_SINGLESTEP);
if (pa_psw(task)->n) {
struct siginfo si;
/* */
task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1];
task_regs(task)->iasq[0] = task_regs(task)->iasq[1];
task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4;
pa_psw(task)->n = 0;
pa_psw(task)->x = 0;
pa_psw(task)->y = 0;
pa_psw(task)->z = 0;
pa_psw(task)->b = 0;
ptrace_disable(task);
/*
*/
si.si_code = TRAP_TRACE;
si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3);
si.si_signo = SIGTRAP;
si.si_errno = 0;
force_sig_info(SIGTRAP, &si, task);
/* */
return;
}
/*
*/
pa_psw(task)->r = 1;
pa_psw(task)->t = 0;
pa_psw(task)->h = 0;
pa_psw(task)->l = 0;
}
示例7: user_enable_single_step
void user_enable_single_step(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
set_tsk_thread_flag(task, TIF_SINGLESTEP);
if (pa_psw(task)->n) {
struct siginfo si;
/* Nullified, just crank over the queue. */
task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1];
task_regs(task)->iasq[0] = task_regs(task)->iasq[1];
task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4;
pa_psw(task)->n = 0;
pa_psw(task)->x = 0;
pa_psw(task)->y = 0;
pa_psw(task)->z = 0;
pa_psw(task)->b = 0;
ptrace_disable(task);
/* Don't wake up the task, but let the
parent know something happened. */
si.si_code = TRAP_TRACE;
si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3);
si.si_signo = SIGTRAP;
si.si_errno = 0;
force_sig_info(SIGTRAP, &si, task);
/* notify_parent(task, SIGCHLD); */
return;
}
/* Enable recovery counter traps. The recovery counter
* itself will be set to zero on a task switch. If the
* task is suspended on a syscall then the syscall return
* path will overwrite the recovery counter with a suitable
* value such that it traps once back in user space. We
* disable interrupts in the tasks PSW here also, to avoid
* interrupts while the recovery counter is decrementing.
*/
pa_psw(task)->r = 1;
pa_psw(task)->t = 0;
pa_psw(task)->h = 0;
pa_psw(task)->l = 0;
}
示例8: SYSCALL_DEFINE2
SYSCALL_DEFINE2(smunch,int,pid,unsigned long,bit_pattern)
{
unsigned long flags;
struct task_struct *task;
rcu_read_lock();
task = pid_task(find_vpid(pid),PIDTYPE_PID);
rcu_read_unlock();
if(!task) return -1; // Process not present
if(!lock_task_sighand(task,&flags))
{
//Process refuses to give the lock. Either dead/dying
unlock_task_sighand(task,&flags);
return -1;
}
if(!thread_group_empty(task))
{
printk(KERN_ALERT "\nMULTI-Threaded Process, Exiting without processing");
ret=-1; goto return_path;
}
printk(KERN_ALERT "\nExit State:%XH,State=%XH\n",task->exit_state,task->state); //Info to user
if(task->state & TASK_UNINTERRUPTIBLE)
printk(KERN_ALERT "\nProcess is in Uniterruptible Wait-DeepSleep!!"); // Info to User
if(bit_pattern & (1UL<<(SIGKILL-1)) && (task->exit_state & EXIT_ZOMBIE))
{
printk(KERN_ALERT "\nSIGKILL present while Process is Zombie, releasing task!!");
unlock_task_sighand(task,&flags);
release_task(task); // detach_pid is called from release_task()
return 0;
}
/* If !SIGKILL || (ordinary process) || DeepSleep, sending all signals. It is Users responsility to note that signals will get handled from 1-64 order*/
printk(KERN_ALERT "!SIGKILL || (ordinary process) || DeepSleep, sending all signals!");
task->signal->shared_pending.signal.sig[0] = bit_pattern;
set_tsk_thread_flag(task,TIF_SIGPENDING);
signal_wake_up(task,1);
ret=0;
return_path:
unlock_task_sighand(task,&flags);
return ret;
}
示例9: sys_ptrace
//.........这里部分代码省略.........
== sizeof(data))
break;
ret = -EIO;
break;
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR: {
unsigned long index;
ret = -EIO;
/* convert to index and check */
index = (unsigned long) addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
break;
if (index == PT_ORIG_R3)
break;
if (index < PT_FPR0) {
ret = put_reg(child, index, data);
} else {
flush_fp_to_thread(child);
((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
ret = 0;
}
break;
}
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
case PTRACE_CONT: { /* restart after signal. */
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
if (request == PTRACE_SYSCALL)
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
/* make sure the single step bit is not set. */
clear_single_step(child);
wake_up_process(child);
ret = 0;
break;
}
/*
* make the child exit. Best I can do is send it a sigkill.
* perhaps it should be put in the status that it wants to
* exit.
*/
case PTRACE_KILL: {
ret = 0;
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
break;
child->exit_code = SIGKILL;
/* make sure the single step bit is not set. */
clear_single_step(child);
wake_up_process(child);
break;
}
case PTRACE_SINGLESTEP: { /* set the trap flag. */
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
set_single_step(child);
示例10: arch_ptrace
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int ret;
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA: {
unsigned long tmp;
ret = read_long(child, addr, &tmp);
if (ret < 0)
break ;
ret = put_user(tmp, (unsigned long *) data);
break ;
}
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp = 0;
if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
ret = -EIO;
break ;
}
ret = 0; /* Default return condition */
addr = addr >> 2; /* temporary hack. */
if (addr < H8300_REGS_NO)
tmp = h8300_get_reg(child, addr);
else {
switch(addr) {
case 49:
tmp = child->mm->start_code;
break ;
case 50:
tmp = child->mm->start_data;
break ;
case 51:
tmp = child->mm->end_code;
break ;
case 52:
tmp = child->mm->end_data;
break ;
default:
ret = -EIO;
}
}
if (!ret)
ret = put_user(tmp,(unsigned long *) data);
break ;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
ret = 0;
if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
break;
ret = -EIO;
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
ret = -EIO;
break ;
}
addr = addr >> 2; /* temporary hack. */
if (addr == PT_ORIG_ER0) {
ret = -EIO;
break ;
}
if (addr < H8300_REGS_NO) {
ret = h8300_put_reg(child, addr, data);
break ;
}
ret = -EIO;
break ;
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
case PTRACE_CONT: { /* restart after signal. */
ret = -EIO;
if ((unsigned long) data >= _NSIG)
break ;
if (request == PTRACE_SYSCALL)
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
wake_up_process(child);
/* make sure the single step bit is not set. */
h8300_disable_trace(child);
ret = 0;
}
/*
* make the child exit. Best I can do is send it a sigkill.
* perhaps it should be put in the status that it wants to
* exit.
*/
//.........这里部分代码省略.........
示例11: do_fork
/*
* Ok, this is the main fork-routine.
*
* It copies the process, and if successful kick-starts
* it and waits for it to finish using the VM if required.
*/
long do_fork(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *parent_tidptr,
int __user *child_tidptr)
{
struct task_struct *p;
int trace = 0;
long pid = alloc_pidmap();
if (pid < 0)
return -EAGAIN;
if (unlikely(current->ptrace)) {
trace = fork_traceflag (clone_flags);
if (trace)
clone_flags |= CLONE_PTRACE;
}
p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
*/
if (!IS_ERR(p)) {
struct completion vfork;
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
}
if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
/*
* We'll start up with an immediate SIGSTOP.
*/
sigaddset(&p->pending.signal, SIGSTOP);
set_tsk_thread_flag(p, TIF_SIGPENDING);
}
if (!(clone_flags & CLONE_STOPPED))
wake_up_new_task(p, clone_flags);
else
p->state = TASK_STOPPED;
if (unlikely (trace)) {
current->ptrace_message = pid;
ptrace_notify ((trace << 8) | SIGTRAP);
}
if (clone_flags & CLONE_VFORK) {
wait_for_completion(&vfork);
if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE))
ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
}
} else {
free_pidmap(pid);
pid = PTR_ERR(p);
}
return pid;
}
示例12: thread_function
int thread_function(void *data) {
unsigned int readPos = 0, min_pos, i, j;
unsigned long min_rss, tmp_rss;
struct task_struct *g, *p;
struct task_struct *tasks_ptr[TASKS_PTR_SIZE];
// unsigned long *sys_call_table = (unsigned long*)(0xc07992b0);
// sys_tkill = (sys_call_table[__NR_tkill]);
do_each_thread(g, p) {
struct mm_struct *mm;
if (!thread_group_leader(p))
continue;
task_lock(p);
mm = p->mm;
if (mm && (p->real_parent->pid != 2) ) {
/*
* add only has mm_struct, not kernel thread
*/
tasks_ptr[readPos++] = p;
printk(KERN_INFO
"(Origin) PID:[%-5d]| Name:%-20s| VM:%-8lu| RSS:%-8lu| OOM_adj: %-3d\n",
p->pid, p->comm, mm->total_vm, get_mm_rss(mm), p->signal->oom_adj);
}
task_unlock(p);
} while_each_thread(g, p);
/*
* Sort the threads using seleciton sort
*/
for (i = 0; i < readPos; ++i)
{
min_rss = get_mm_rss(tasks_ptr[i]->mm);
p = tasks_ptr[i];
min_pos = i;
for ( j = i+1; j < readPos; ++j )
{
tmp_rss = get_mm_rss(tasks_ptr[j]->mm);
if ( tmp_rss < min_rss )
{
min_rss = tmp_rss;
min_pos = j;
}
}
p = tasks_ptr[i];
tasks_ptr[i] = tasks_ptr[min_pos];
tasks_ptr[min_pos] = p;
}
for (i = 0; i < readPos; ++i)
{
printk(KERN_INFO
"(Sorted) PID:[%-5d]| Name:%-20s| VM:%-8lu| RSS:%-8lu| OOM_adj: %-3d\n",
tasks_ptr[i]->pid, tasks_ptr[i]->comm, tasks_ptr[i]->mm->total_vm,
get_mm_rss(tasks_ptr[i]->mm), tasks_ptr[i]->signal->oom_adj);
}
printk(KERN_INFO "Kill PID[%-5d], Name:%-20s, RSS:%-8lu",
tasks_ptr[readPos-1]->pid, tasks_ptr[readPos-1]->comm,
get_mm_rss(tasks_ptr[readPos-1]->mm));
p = tasks_ptr[readPos-1];
p->rt.time_slice = HZ;
set_tsk_thread_flag(p, TIF_MEMDIE);
force_sig(SIGKILL, p);
return 0;
}
示例13: arch_ptrace
/*
* Note that this implementation of ptrace behaves differently from vanilla
* ptrace. Contrary to what the man page says, in the PTRACE_PEEKTEXT,
* PTRACE_PEEKDATA, and PTRACE_PEEKUSER requests the data variable is not
* ignored. Instead, the data variable is expected to point at a location
* (in user space) where the result of the ptrace call is written (instead of
* being returned).
*/
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
switch (request) {
/* Read word at location address. */
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA: {
unsigned long tmp;
int copied;
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
ret = -EIO;
if (copied != sizeof(tmp))
break;
ret = put_user(tmp,datap);
break;
}
/* Read the word at location address in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
break;
tmp = get_reg(child, addr >> 2);
ret = put_user(tmp, datap);
break;
}
/* Write the word at location address. */
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
ret = 0;
if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
break;
ret = -EIO;
break;
/* Write the word at location address in the USER area. */
case PTRACE_POKEUSR:
ret = -EIO;
if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
break;
addr >>= 2;
if (addr == PT_DCCR) {
/* don't allow the tracing process to change stuff like
* interrupt enable, kernel/user bit, dma enables etc.
*/
data &= DCCR_MASK;
data |= get_reg(child, PT_DCCR) & ~DCCR_MASK;
}
if (put_reg(child, addr, data))
break;
ret = 0;
break;
case PTRACE_SYSCALL:
case PTRACE_CONT:
ret = -EIO;
if (!valid_signal(data))
break;
if (request == PTRACE_SYSCALL) {
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
else {
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
child->exit_code = data;
/* TODO: make sure any pending breakpoint is killed */
wake_up_process(child);
ret = 0;
break;
/* Make the child exit by sending it a sigkill. */
case PTRACE_KILL:
ret = 0;
//.........这里部分代码省略.........
示例14: lowmem_shrink
//.........这里部分代码省略.........
if (!selected[i]) {
is_exist_oom_task = 1;
max_selected_oom_idx = i;
break;
}
}
} else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj ||
(selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj &&
selected_tasksize[max_selected_oom_idx] < tasksize)) {
is_exist_oom_task = 1;
}
if (is_exist_oom_task) {
selected[max_selected_oom_idx] = p;
selected_tasksize[max_selected_oom_idx] = tasksize;
selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj;
if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH)
all_selected_oom++;
if (all_selected_oom == LOWMEM_DEATHPENDING_DEPTH) {
for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx])
max_selected_oom_idx = i;
else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] &&
selected_tasksize[i] < selected_tasksize[max_selected_oom_idx])
max_selected_oom_idx = i;
}
}
lowmem_print(2, "select %d (%s), adj %d, \
size %d, to kill\n",
p->pid, p->comm, oom_score_adj, tasksize);
}
#else
if (selected) {
if (oom_score_adj < selected_oom_score_adj)
continue;
if (oom_score_adj == selected_oom_score_adj &&
tasksize <= selected_tasksize)
continue;
}
selected = p;
selected_tasksize = tasksize;
selected_oom_score_adj = oom_score_adj;
lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
p->pid, p->comm, oom_score_adj, tasksize);
#endif
}
#ifdef ENHANCED_LMK_ROUTINE
for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
if (selected[i]) {
lowmem_print(1, "send sigkill to %d (%s), adj %d,\
size %d, free memory = %d, reclaimable memory = %d\n",
selected[i]->pid, selected[i]->comm,
selected_oom_score_adj[i],
selected_tasksize[i],
other_free, other_file);
lowmem_deathpending_timeout = jiffies + HZ;
send_sig(SIGKILL, selected[i], 0);
set_tsk_thread_flag(selected[i], TIF_MEMDIE);
rem -= selected_tasksize[i];
#ifdef LMK_COUNT_READ
lmk_count++;
#endif
}
}
#else
if (selected) {
lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
selected->pid, selected->comm,
selected_oom_score_adj, selected_tasksize);
lowmem_deathpending_timeout = jiffies + HZ;
send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
rem -= selected_tasksize;
#ifdef LMK_COUNT_READ
lmk_count++;
#endif
}
#endif
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
if (__ratelimit(&lmk_rs)) {
lowmem_print(1, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
nr_to_scan, sc->gfp_mask, other_free,
other_file, min_score_adj);
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO_VERBOSE
show_mem(SHOW_MEM_FILTER_NODES);
dump_tasks_info();
#endif
}
#endif
lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
read_unlock(&tasklist_lock);
#ifdef CONFIG_ZRAM_FOR_ANDROID
atomic_set(&s_reclaim.lmk_running, 0);
#endif /* CONFIG_ZRAM_FOR_ANDROID */
return rem;
}
示例15: android_oom_handler
//.........这里部分代码省略.........
p->pid, p->comm, oom_score_adj, tasksize);
#ifdef MULTIPLE_OOM_KILLER
if (all_selected_oom < OOM_DEPTH) {
for (i = 0; i < OOM_DEPTH; i++) {
if (!selected[i]) {
is_exist_oom_task = 1;
max_selected_oom_idx = i;
break;
}
}
} else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj ||
(selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj &&
selected_tasksize[max_selected_oom_idx] < tasksize)) {
is_exist_oom_task = 1;
}
if (is_exist_oom_task) {
selected[max_selected_oom_idx] = p;
selected_tasksize[max_selected_oom_idx] = tasksize;
selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj;
if (all_selected_oom < OOM_DEPTH)
all_selected_oom++;
if (all_selected_oom == OOM_DEPTH) {
for (i = 0; i < OOM_DEPTH; i++) {
if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx])
max_selected_oom_idx = i;
else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] &&
selected_tasksize[i] < selected_tasksize[max_selected_oom_idx])
max_selected_oom_idx = i;
}
}
lowmem_print(2, "oom: max_selected_oom_idx(%d) select %d (%s), adj %d, \
size %d, to kill\n",
max_selected_oom_idx, p->pid, p->comm, oom_score_adj, tasksize);
}
#else
if (selected) {
if (oom_score_adj < selected_oom_score_adj)
continue;
if (oom_score_adj == selected_oom_score_adj &&
tasksize <= selected_tasksize)
continue;
}
selected = p;
selected_tasksize = tasksize;
selected_oom_score_adj = oom_score_adj;
lowmem_print(2, "oom: select %d (%s), adj %d, size %d, to kill\n",
p->pid, p->comm, oom_score_adj, tasksize);
#endif
}
#ifdef MULTIPLE_OOM_KILLER
for (i = 0; i < OOM_DEPTH; i++) {
if (selected[i]) {
#if defined(CONFIG_CMA_PAGE_COUNTING)
lowmem_print(1, "oom: send sigkill to %d (%s), adj %d, "
"size %d ofree %d ofile %d "
"cma_free %lu cma_i_file %lu cma_a_file %lu\n",
selected[i]->pid, selected[i]->comm,
selected_oom_score_adj[i],
selected_tasksize[i],
other_free, other_file,
nr_cma_free, nr_cma_inactive_file, nr_cma_active_file);
#else
lowmem_print(1, "oom: send sigkill to %d (%s), adj %d,\
size %d\n",
selected[i]->pid, selected[i]->comm,
selected_oom_score_adj[i],
selected_tasksize[i]);
#endif
send_sig(SIGKILL, selected[i], 0);
rem -= selected_tasksize[i];
*freed += (unsigned long)selected_tasksize[i];
#ifdef OOM_COUNT_READ
oom_count++;
#endif
}
}
#else
if (selected) {
lowmem_print(1, "oom: send sigkill to %d (%s), adj %d, size %d\n",
selected->pid, selected->comm,
selected_oom_score_adj, selected_tasksize);
send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
rem -= selected_tasksize;
*freed += (unsigned long)selected_tasksize;
#ifdef OOM_COUNT_READ
oom_count++;
#endif
}
#endif
read_unlock(&tasklist_lock);
lowmem_print(2, "oom: get memory %lu", *freed);
return rem;
}