本文整理汇总了C++中pid_alive函数的典型用法代码示例。如果您正苦于以下问题:C++ pid_alive函数的具体用法?C++ pid_alive怎么用?C++ pid_alive使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pid_alive函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: task_state
static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *p)
{
struct user_namespace *user_ns = seq_user_ns(m);
struct group_info *group_info;
int g;
struct fdtable *fdt = NULL;
const struct cred *cred;
pid_t ppid, tpid;
rcu_read_lock();
ppid = pid_alive(p) ?
task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
tpid = 0;
if (pid_alive(p)) {
struct task_struct *tracer = ptrace_parent(p);
if (tracer)
tpid = task_pid_nr_ns(tracer, ns);
}
cred = get_task_cred(p);
seq_printf(m,
"State:\t%s\n"
"Tgid:\t%d\n"
"Pid:\t%d\n"
"PPid:\t%d\n"
"TracerPid:\t%d\n"
"Uid:\t%d\t%d\t%d\t%d\n"
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
task_tgid_nr_ns(p, ns),
pid_nr_ns(pid, ns),
ppid, tpid,
from_kuid_munged(user_ns, cred->uid),
from_kuid_munged(user_ns, cred->euid),
from_kuid_munged(user_ns, cred->suid),
from_kuid_munged(user_ns, cred->fsuid),
from_kgid_munged(user_ns, cred->gid),
from_kgid_munged(user_ns, cred->egid),
from_kgid_munged(user_ns, cred->sgid),
from_kgid_munged(user_ns, cred->fsgid));
task_lock(p);
if (p->files)
fdt = files_fdtable(p->files);
seq_printf(m,
"FDSize:\t%d\n"
"Groups:\t",
fdt ? fdt->max_fds : 0);
rcu_read_unlock();
group_info = cred->group_info;
task_unlock(p);
for (g = 0; g < group_info->ngroups; g++)
seq_printf(m, "%d ",
from_kgid_munged(user_ns, GROUP_AT(group_info, g)));
put_cred(cred);
seq_putc(m, '\n');
}
示例2: task_state
static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *p)
{
struct group_info *group_info;
int g;
struct fdtable *fdt = NULL;
const struct cred *cred;
pid_t ppid, tpid, vpid;
rcu_read_lock();
ppid = pid_alive(p) ? ve_task_ppid_nr_ns(p, ns) : 0;
tpid = 0;
if (pid_alive(p)) {
struct task_struct *tracer = tracehook_tracer_task(p);
if (tracer)
tpid = task_pid_nr_ns(tracer, ns);
}
vpid = task_virtual_pid(p);
cred = get_task_cred(p);
seq_printf(m,
"State:\t%s\n"
"Tgid:\t%d\n"
"Pid:\t%d\n"
"PPid:\t%d\n"
"TracerPid:\t%d\n"
"Uid:\t%d\t%d\t%d\t%d\n"
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
task_tgid_nr_ns(p, ns),
pid_nr_ns(pid, ns),
ppid, tpid,
cred->uid, cred->euid, cred->suid, cred->fsuid,
cred->gid, cred->egid, cred->sgid, cred->fsgid);
task_utrace_proc_status(m, p);
task_lock(p);
if (p->files)
fdt = files_fdtable(p->files);
seq_printf(m,
"FDSize:\t%d\n"
"Groups:\t",
fdt ? fdt->max_fds : 0);
rcu_read_unlock();
group_info = cred->group_info;
task_unlock(p);
for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
seq_printf(m, "%d ", GROUP_AT(group_info, g));
put_cred(cred);
seq_printf(m, "\n");
seq_printf(m, "envID:\t%d\nVPid:\t%d\n",
p->ve_task_info.owner_env->veid, vpid);
seq_printf(m, "StopState:\t%u\n", p->stopped_state);
}
示例3: task_state
static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *p)
{
struct group_info *group_info;
int g;
struct fdtable *fdt = NULL;
pid_t ppid, tpid;
rcu_read_lock();
ppid = pid_alive(p) ?
task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
tpid = 0;
if (pid_alive(p)) {
struct task_struct *tracer = tracehook_tracer_task(p);
if (tracer)
tpid = task_pid_nr_ns(tracer, ns);
}
seq_printf(m,
"State:\t%s\n"
"Tgid:\t%d\n"
"Pid:\t%d\n"
"PPid:\t%d\n"
"TracerPid:\t%d\n"
"Uid:\t%d\t%d\t%d\t%d\n"
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
task_tgid_nr_ns(p, ns),
pid_nr_ns(pid, ns),
ppid, tpid,
p->uid, p->euid, p->suid, p->fsuid,
p->gid, p->egid, p->sgid, p->fsgid);
task_lock(p);
if (p->files)
fdt = files_fdtable(p->files);
seq_printf(m,
"FDSize:\t%d\n"
"Groups:\t",
fdt ? fdt->max_fds : 0);
rcu_read_unlock();
group_info = p->group_info;
get_group_info(group_info);
task_unlock(p);
for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
seq_printf(m, "%d ", GROUP_AT(group_info, g));
put_group_info(group_info);
seq_printf(m, "\n");
}
示例4: check_lock
/*
* Check that the processes holding locks are still alive.
*/
static bool check_lock(lock_t *_lock)
{
pid_t pid;
/* We don't care about unlocked or locking-in-progress */
if (_lock->lock != LOCKED)
return FALSE;
/* First the easy case. If it's held by a dead pid, release it. */
pid = _lock->owner;
/* if we're in the process of unlocking, it can show up as LOCKED
* but with no owner. Just bail, we'll try again next time around.
*/
if (pid == 0)
return FALSE;
if (pid_alive(pid) == FALSE) {
if (errno != ESRCH)
return TRUE;
debugf("Found a lock held by dead pid %d. Freeing.\n", pid);
unlock(_lock);
return TRUE;
}
return FALSE;
}
示例5: check_lock
static void check_lock(lock_t *_lock)
{
pid_t pid;
if (_lock->lock != LOCKED)
return;
/* First the easy case. If it's held by a dead pid, release it. */
pid = _lock->owner;
if (pid_alive(pid) == -1) {
if (errno != ESRCH)
return;
debugf("Found a lock held by dead pid %d. Freeing.\n", pid);
unlock(_lock);
return;
}
/* If a pid has had a lock a long time, something is up. */
if (_lock->contention > STEAL_THRESHOLD) {
debugf("pid %d has held lock for too long. Releasing, and killing.\n", pid);
kill_pid(pid);
unlock(_lock);
return;
}
return;
}
示例6: kill_child_checks
/* send kill to all forked processes */
void kill_child_checks(void) {
int retval;
pid_t pid;
signal(SIGINT, SIG_IGN);
pid = getpid();
if(current_child_pid > 0 && current_child_pid != pid) {
gm_log( GM_LOG_TRACE, "kill_child_checks(): send SIGINT to %d\n", current_child_pid);
kill(-current_child_pid, SIGINT);
kill(current_child_pid, SIGINT);
sleep(1);
if(waitpid(current_child_pid,&retval,WNOHANG)!=0) {
signal(SIGINT, SIG_DFL);
return;
}
if(pid_alive(current_child_pid)) {
gm_log( GM_LOG_TRACE, "kill_child_checks(): send SIGKILL to %d\n", current_child_pid);
kill(current_child_pid, SIGKILL);
}
}
gm_log( GM_LOG_TRACE, "send SIGINT to %d\n", pid);
kill(0, SIGINT);
signal(SIGINT, SIG_DFL);
return;
}
示例7: Moca_MonitorThread
/* Main function for TLB walkthrough
* Check accessed page every Memmap_wakeupsIinterval ms
*/
int Moca_MonitorThread(void * arg)
{
task_data data;
moca_task t;
struct task_struct * task;
//Init tlb walk data
int pos;
unsigned long long lastwake=0;
MOCA_DEBUG_PRINT("Moca monitor thread alive \n");
while(!kthread_should_stop())
{
pos=0;
while((t=Moca_NextTask(&pos)))
{
data=t->data;
task=(struct task_struct *)(t->key);
MOCA_DEBUG_PRINT("Moca monitor thread testing task %p\n", task);
if(pid_alive(task) && task->sched_info.last_arrival >= lastwake)
{
lastwake=task->sched_info.last_arrival;
MOCA_DEBUG_PRINT("Moca monitor thread found task %p\n",task);
Moca_MonitorPage(data);
}
}
Moca_UpdateClock();
MOCA_DEBUG_PRINT("Moca monitor thread going to sleep for %d\n",
Moca_wakeupInterval);
msleep(Moca_wakeupInterval);
}
MOCA_DEBUG_PRINT("Moca monitor thread finished\n");
return 0;
}
示例8: Moca_AddTaskIfNeeded
// Add pid to the monitored process if pid is a monitored process
moca_task Moca_AddTaskIfNeeded(struct task_struct *t)
{
moca_task ret=NULL;
if(t && pid_alive(t) && t->real_parent && Moca_ShouldMonitorTask(t))
ret=Moca_AddTask(t);
return ret;
}
示例9: thread_group_cputime
/*
* Accumulate raw cputime values of dead tasks (sig->[us]time) and live
* tasks (sum on group iteration) belonging to @tsk's group.
*/
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
{
struct signal_struct *sig = tsk->signal;
cputime_t utime, stime;
struct task_struct *t;
times->utime = sig->utime;
times->stime = sig->stime;
times->sum_exec_runtime = sig->sum_sched_runtime;
rcu_read_lock();
/* make sure we can trust tsk->thread_group list */
if (!likely(pid_alive(tsk)))
goto out;
t = tsk;
do {
task_cputime(tsk, &utime, &stime);
times->utime += utime;
times->stime += stime;
times->sum_exec_runtime += task_sched_runtime(t);
} while_each_thread(tsk, t);
out:
rcu_read_unlock();
}
示例10: init_child
void init_child(int childno)
{
cpu_set_t set;
pid_t pid = getpid();
char childname[17];
this_child = childno;
set_seed(childno);
shm->kill_count[childno] = 0;
shm->num_mappings[childno] = 0;
shm->mappings[childno] = zmalloc(sizeof(struct map));
INIT_LIST_HEAD(&shm->mappings[childno]->list);
setup_page_maps();
if (sched_getaffinity(pid, sizeof(set), &set) == 0) {
CPU_ZERO(&set);
CPU_SET(childno, &set);
sched_setaffinity(pid, sizeof(set), &set);
}
shm->child_syscall_count[childno] = 0;
memset(childname, 0, sizeof(childname));
sprintf(childname, "trinity-c%d", childno);
prctl(PR_SET_NAME, (unsigned long) &childname);
oom_score_adj(500);
/* Wait for parent to set our pidslot */
while (shm->pids[childno] != getpid()) {
int ret = 0;
/* Make sure parent is actually alive to wait for us. */
ret = pid_alive(shm->mainpid);
if (ret != 0) {
shm->exit_reason = EXIT_SHM_CORRUPTION;
outputerr(BUGTXT "parent (%d) went away!\n", shm->mainpid);
sleep(20000);
}
}
/* Wait for all the children to start up. */
while (shm->ready == FALSE)
sleep(1);
set_make_it_fail();
if (rand() % 100 < 50)
use_fpu();
mask_signals_child();
disable_coredumps();
}
示例11: task_state
static inline char * task_state(struct task_struct *p, char *buffer)
{
struct group_info *group_info;
int g;
struct fdtable *fdt = NULL;
read_lock(&tasklist_lock);
buffer += sprintf(buffer,
"State:\t%s\n"
"SleepAVG:\t%lu%%\n"
"Tgid:\t%d\n"
"Pid:\t%d\n"
"PPid:\t%d\n"
"TracerPid:\t%d\n"
"Uid:\t%d\t%d\t%d\t%d\n"
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
(p->sleep_avg/1024)*100/(1020000000/1024),
p->tgid,
p->pid, pid_alive(p) ? p->group_leader->real_parent->tgid : 0,
pid_alive(p) && p->ptrace ? p->parent->pid : 0,
p->uid, p->euid, p->suid, p->fsuid,
p->gid, p->egid, p->sgid, p->fsgid);
read_unlock(&tasklist_lock);
task_lock(p);
rcu_read_lock();
if (p->files)
fdt = files_fdtable(p->files);
buffer += sprintf(buffer,
"FDSize:\t%d\n"
"Groups:\t",
fdt ? fdt->max_fds : 0);
rcu_read_unlock();
group_info = p->group_info;
get_group_info(group_info);
task_unlock(p);
for (g = 0; g < min(group_info->ngroups,NGROUPS_SMALL); g++)
buffer += sprintf(buffer, "%d ", GROUP_AT(group_info,g));
put_group_info(group_info);
buffer += sprintf(buffer, "\n");
return buffer;
}
示例12: task_state
static inline void task_state(struct seq_file *m, struct pid *pid,
struct task_struct *p)
{
struct group_info *group_info;
int g;
struct fdtable *fdt = NULL;
rcu_read_lock();
seq_printf(m,
"State:\t%s\n"
"SleepAVG:\t%lu%%\n"
"Tgid:\t%d\n"
"Pid:\t%d\n"
"PPid:\t%d\n"
"TracerPid:\t%d\n"
"Uid:\t%d\t%d\t%d\t%d\n"
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
(p->sleep_avg/1024)*100/(1020000000/1024),
p->tgid, p->pid,
pid_alive(p) ? rcu_dereference(p->real_parent)->tgid : 0,
pid_alive(p) && p->ptrace ? rcu_dereference(p->parent)->pid : 0,
p->uid, p->euid, p->suid, p->fsuid,
p->gid, p->egid, p->sgid, p->fsgid);
task_lock(p);
if (p->files)
fdt = files_fdtable(p->files);
seq_printf(m,
"FDSize:\t%d\n"
"Groups:\t",
fdt ? fdt->max_fds : 0);
rcu_read_unlock();
group_info = p->group_info;
get_group_info(group_info);
task_unlock(p);
for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
seq_printf(m, "%d ", GROUP_AT(group_info, g));
put_group_info(group_info);
seq_printf(m, "\n");
}
示例13: do_extrafork
/* This is a special case for things like execve, which would replace our
* child process with something unknown to us. We use a 'throwaway' process
* to do the execve in, and let it run for a max of a second before we kill it
*/
static void do_extrafork(struct syscallrecord *rec)
{
pid_t pid = 0;
pid_t extrapid;
extrapid = fork();
if (extrapid == 0) {
/* grand-child */
char childname[]="trinity-subchild";
prctl(PR_SET_NAME, (unsigned long) &childname);
__do_syscall(rec, GOING_AWAY);
/* if this was for eg. an successful execve, we should never get here.
* if it failed though... */
_exit(EXIT_SUCCESS);
}
/* misc failure. */
if (extrapid == -1) {
//debugf("Couldn't fork grandchild: %s\n", strerror(errno));
return;
}
/* small pause to let grandchild do some work. */
if (pid_alive(extrapid) == TRUE)
usleep(100);
/* We take the rec lock here even though we don't obviously use it.
* The reason, is that the grandchild is using it. */
lock(&rec->lock);
while (pid == 0) {
int childstatus;
pid = waitpid(extrapid, &childstatus, WUNTRACED | WCONTINUED | WNOHANG);
if (pid_alive(extrapid) == TRUE)
kill(extrapid, SIGKILL);
usleep(1000);
}
unlock(&rec->lock);
}
示例14: timer_function
static void timer_function(unsigned long par)
{
ushort cpu_share;
if (unlikely(!pid_alive(check_task))) {
del_timer(&check_timer);
printk(KERN_INFO "sendsig: cannot find pid %i. Is the process still active? Timer removed\n", pid);
return;
}
cpu_share = thread_group_cpu_share(check_task);
if (cpu_share >= max_cpu_share) {
count_check++;
printk(KERN_INFO "sendsig: current cpu share over limit of %i (check #%i)\n",
max_cpu_share, count_check);
/* the ratio is: if the process has a cpu share higher than
max_cpu_share for more than max_checks * wait_timeout seconds, then
we'll send the signal sig_to_send to it
*/
if (count_check >= max_checks) {
/*
sending the signal to the process
*/
signal_send(check_task);
/*
remove the timer
*/
del_timer(&check_timer);
printk(KERN_INFO "sendsig: sent signal to process %i, timer removed\n", pid);
return;
}
} else {
/*
if the process is being good, let's reset its counter
*/
count_check = 0;
}
/*
update the timer
*/
mod_timer(&check_timer, jiffies + wait_timeout * HZ);
return;
}
示例15: restart_lost_procs
static void restart_lost_procs(void)
{
svc_t *svc;
for (svc = svc_iterator(1); svc; svc = svc_iterator(0)) {
if (svc->pid > 0 && pid_alive(svc->pid))
continue;
/* Only restart lost daemons, not task/run/inetd services */
if (SVC_TYPE_SERVICE != svc->type) {
svc->pid = 0;
continue;
}
service_start(svc);
}
}