本文整理汇总了C++中PROC_LOCK函数的典型用法代码示例。如果您正苦于以下问题:C++ PROC_LOCK函数的具体用法?C++ PROC_LOCK怎么用?C++ PROC_LOCK使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PROC_LOCK函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ia32_osendsig
static void
ia32_osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
{
struct ia32_sigframe3 sf, *fp;
struct proc *p;
struct thread *td;
struct sigacts *psp;
struct trapframe *regs;
int sig;
int oonstack;
td = curthread;
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
sig = ksi->ksi_signo;
psp = p->p_sigacts;
mtx_assert(&psp->ps_mtx, MA_OWNED);
regs = td->td_frame;
oonstack = sigonstack(regs->tf_rsp);
/* Allocate space for the signal handler context. */
if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
SIGISMEMBER(psp->ps_sigonstack, sig)) {
fp = (struct ia32_sigframe3 *)(td->td_sigstk.ss_sp +
td->td_sigstk.ss_size - sizeof(sf));
td->td_sigstk.ss_flags |= SS_ONSTACK;
} else
fp = (struct ia32_sigframe3 *)regs->tf_rsp - 1;
/* Translate the signal if appropriate. */
if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
/* Build the argument list for the signal handler. */
sf.sf_signum = sig;
sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
if (SIGISMEMBER(psp->ps_siginfo, sig)) {
/* Signal handler installed with SA_SIGINFO. */
sf.sf_arg2 = (register_t)&fp->sf_siginfo;
sf.sf_siginfo.si_signo = sig;
sf.sf_siginfo.si_code = ksi->ksi_code;
sf.sf_ah = (uintptr_t)catcher;
} else {
/* Old FreeBSD-style arguments. */
sf.sf_arg2 = ksi->ksi_code;
sf.sf_addr = (register_t)ksi->ksi_addr;
sf.sf_ah = (uintptr_t)catcher;
}
mtx_unlock(&psp->ps_mtx);
PROC_UNLOCK(p);
/* Save most if not all of trap frame. */
sf.sf_siginfo.si_sc.sc_eax = regs->tf_rax;
sf.sf_siginfo.si_sc.sc_ebx = regs->tf_rbx;
sf.sf_siginfo.si_sc.sc_ecx = regs->tf_rcx;
sf.sf_siginfo.si_sc.sc_edx = regs->tf_rdx;
sf.sf_siginfo.si_sc.sc_esi = regs->tf_rsi;
sf.sf_siginfo.si_sc.sc_edi = regs->tf_rdi;
sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
sf.sf_siginfo.si_sc.sc_gs = regs->tf_gs;
sf.sf_siginfo.si_sc.sc_isp = regs->tf_rsp;
/* Build the signal context to be used by osigreturn(). */
sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
sf.sf_siginfo.si_sc.sc_esp = regs->tf_rsp;
sf.sf_siginfo.si_sc.sc_ebp = regs->tf_rbp;
sf.sf_siginfo.si_sc.sc_eip = regs->tf_rip;
sf.sf_siginfo.si_sc.sc_eflags = regs->tf_rflags;
sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
/*
* Copy the sigframe out to the user's stack.
*/
if (copyout(&sf, fp, sizeof(*fp)) != 0) {
#ifdef DEBUG
printf("process %ld has trashed its stack\n", (long)p->p_pid);
#endif
PROC_LOCK(p);
sigexit(td, SIGILL);
}
regs->tf_rsp = (uintptr_t)fp;
regs->tf_rip = p->p_sysent->sv_psstrings - sz_ia32_osigcode;
regs->tf_rflags &= ~(PSL_T | PSL_D);
regs->tf_cs = _ucode32sel;
regs->tf_ds = _udatasel;
regs->tf_es = _udatasel;
regs->tf_fs = _udatasel;
regs->tf_ss = _udatasel;
set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
PROC_LOCK(p);
mtx_lock(&psp->ps_mtx);
}
示例2: linux_common_open
static int
linux_common_open(struct thread *td, int dirfd, char *path, int l_flags, int mode)
{
cap_rights_t rights;
struct proc *p = td->td_proc;
struct file *fp;
int fd;
int bsd_flags, error;
bsd_flags = 0;
switch (l_flags & LINUX_O_ACCMODE) {
case LINUX_O_WRONLY:
bsd_flags |= O_WRONLY;
break;
case LINUX_O_RDWR:
bsd_flags |= O_RDWR;
break;
default:
bsd_flags |= O_RDONLY;
}
if (l_flags & LINUX_O_NDELAY)
bsd_flags |= O_NONBLOCK;
if (l_flags & LINUX_O_APPEND)
bsd_flags |= O_APPEND;
if (l_flags & LINUX_O_SYNC)
bsd_flags |= O_FSYNC;
if (l_flags & LINUX_O_NONBLOCK)
bsd_flags |= O_NONBLOCK;
if (l_flags & LINUX_FASYNC)
bsd_flags |= O_ASYNC;
if (l_flags & LINUX_O_CREAT)
bsd_flags |= O_CREAT;
if (l_flags & LINUX_O_TRUNC)
bsd_flags |= O_TRUNC;
if (l_flags & LINUX_O_EXCL)
bsd_flags |= O_EXCL;
if (l_flags & LINUX_O_NOCTTY)
bsd_flags |= O_NOCTTY;
if (l_flags & LINUX_O_DIRECT)
bsd_flags |= O_DIRECT;
if (l_flags & LINUX_O_NOFOLLOW)
bsd_flags |= O_NOFOLLOW;
if (l_flags & LINUX_O_DIRECTORY)
bsd_flags |= O_DIRECTORY;
/* XXX LINUX_O_NOATIME: unable to be easily implemented. */
error = kern_openat(td, dirfd, path, UIO_SYSSPACE, bsd_flags, mode);
if (error != 0)
goto done;
if (bsd_flags & O_NOCTTY)
goto done;
/*
* XXX In between kern_open() and fget(), another process
* having the same filedesc could use that fd without
* checking below.
*/
fd = td->td_retval[0];
if (fget(td, fd, cap_rights_init(&rights, CAP_IOCTL), &fp) == 0) {
if (fp->f_type != DTYPE_VNODE) {
fdrop(fp, td);
goto done;
}
sx_slock(&proctree_lock);
PROC_LOCK(p);
if (SESS_LEADER(p) && !(p->p_flag & P_CONTROLT)) {
PROC_UNLOCK(p);
sx_sunlock(&proctree_lock);
/* XXXPJD: Verify if TIOCSCTTY is allowed. */
(void) fo_ioctl(fp, TIOCSCTTY, (caddr_t) 0,
td->td_ucred, td);
} else {
PROC_UNLOCK(p);
sx_sunlock(&proctree_lock);
}
fdrop(fp, td);
}
done:
#ifdef DEBUG
if (ldebug(open))
printf(LMSG("open returns error %d"), error);
#endif
LFREEPATH(path);
return (error);
}
示例3: data_abort_handler
//.........这里部分代码省略.........
* userland that actually runs in a priveledged mode
* but uses USR mode permissions for its accesses.
*/
user = 1;
ksig.signb = SIGSEGV;
ksig.code = 0;
goto do_trapsignal;
}
} else {
map = &td->td_proc->p_vmspace->vm_map;
}
/*
* We need to know whether the page should be mapped
* as R or R/W. The MMU does not give us the info as
* to whether the fault was caused by a read or a write.
*
* However, we know that a permission fault can only be
* the result of a write to a read-only location, so
* we can deal with those quickly.
*
* Otherwise we need to disassemble the instruction
* responsible to determine if it was a write.
*/
if (IS_PERMISSION_FAULT(fsr))
ftype = VM_PROT_WRITE;
else {
u_int insn = ReadWord(tf->tf_pc);
if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */
((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */
((insn & 0x0a100000) == 0x08000000)) { /* STM/CDT */
ftype = VM_PROT_WRITE;
} else {
if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */
ftype = VM_PROT_READ | VM_PROT_WRITE;
else
ftype = VM_PROT_READ;
}
}
/*
* See if the fault is as a result of ref/mod emulation,
* or domain mismatch.
*/
#ifdef DEBUG
last_fault_code = fsr;
#endif
if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype,
user)) {
goto out;
}
onfault = pcb->pcb_onfault;
pcb->pcb_onfault = NULL;
if (map != kernel_map) {
PROC_LOCK(p);
p->p_lock++;
PROC_UNLOCK(p);
}
error = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
pcb->pcb_onfault = onfault;
if (map != kernel_map) {
PROC_LOCK(p);
p->p_lock--;
PROC_UNLOCK(p);
}
if (__predict_true(error == 0))
goto out;
if (user == 0) {
if (pcb->pcb_onfault) {
tf->tf_r0 = error;
tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
return;
}
printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype,
error);
dab_fatal(tf, fsr, far, td, &ksig);
}
if (error == ENOMEM) {
printf("VM: pid %d (%s), uid %d killed: "
"out of swap\n", td->td_proc->p_pid, td->td_name,
(td->td_proc->p_ucred) ?
td->td_proc->p_ucred->cr_uid : -1);
ksig.signb = SIGKILL;
} else {
ksig.signb = SIGSEGV;
}
ksig.code = 0;
do_trapsignal:
call_trapsignal(td, ksig.signb, ksig.code);
out:
/* If returning to user mode, make sure to invoke userret() */
if (user)
userret(td, tf);
}
示例4: pmclog_loop
static void
pmclog_loop(void *arg)
{
int error;
struct pmc_owner *po;
struct pmclog_buffer *lb;
struct proc *p;
struct ucred *ownercred;
struct ucred *mycred;
struct thread *td;
struct uio auio;
struct iovec aiov;
size_t nbytes;
po = (struct pmc_owner *) arg;
p = po->po_owner;
td = curthread;
mycred = td->td_ucred;
PROC_LOCK(p);
ownercred = crhold(p->p_ucred);
PROC_UNLOCK(p);
PMCDBG(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread);
KASSERT(po->po_kthread == curthread->td_proc,
("[pmclog,%d] proc mismatch po=%p po/kt=%p curproc=%p", __LINE__,
po, po->po_kthread, curthread->td_proc));
lb = NULL;
/*
* Loop waiting for I/O requests to be added to the owner
* struct's queue. The loop is exited when the log file
* is deconfigured.
*/
mtx_lock(&pmc_kthread_mtx);
for (;;) {
/* check if we've been asked to exit */
if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
break;
if (lb == NULL) { /* look for a fresh buffer to write */
mtx_lock_spin(&po->po_mtx);
if ((lb = TAILQ_FIRST(&po->po_logbuffers)) == NULL) {
mtx_unlock_spin(&po->po_mtx);
/* No more buffers and shutdown required. */
if (po->po_flags & PMC_PO_SHUTDOWN) {
mtx_unlock(&pmc_kthread_mtx);
/*
* Close the file to get PMCLOG_EOF
* error in pmclog(3).
*/
fo_close(po->po_file, curthread);
mtx_lock(&pmc_kthread_mtx);
break;
}
(void) msleep(po, &pmc_kthread_mtx, PWAIT,
"pmcloop", 0);
continue;
}
TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
mtx_unlock_spin(&po->po_mtx);
}
mtx_unlock(&pmc_kthread_mtx);
/* process the request */
PMCDBG(LOG,WRI,2, "po=%p base=%p ptr=%p", po,
lb->plb_base, lb->plb_ptr);
/* change our thread's credentials before issuing the I/O */
aiov.iov_base = lb->plb_base;
aiov.iov_len = nbytes = lb->plb_ptr - lb->plb_base;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_offset = -1;
auio.uio_resid = nbytes;
auio.uio_rw = UIO_WRITE;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_td = td;
/* switch thread credentials -- see kern_ktrace.c */
td->td_ucred = ownercred;
error = fo_write(po->po_file, &auio, ownercred, 0, td);
td->td_ucred = mycred;
if (error) {
/* XXX some errors are recoverable */
/* send a SIGIO to the owner and exit */
PROC_LOCK(p);
kern_psignal(p, SIGIO);
PROC_UNLOCK(p);
//.........这里部分代码省略.........
示例5: exit1
/*
* Exit: deallocate address space and other resources, change proc state to
* zombie, and unlink proc from allproc and parent's lists. Save exit status
* and rusage for wait(). Check for child processes and orphan them.
*/
void
exit1(struct thread *td, int rv)
{
struct proc *p, *nq, *q;
struct vnode *vtmp;
struct vnode *ttyvp = NULL;
struct plimit *plim;
mtx_assert(&Giant, MA_NOTOWNED);
p = td->td_proc;
/*
* XXX in case we're rebooting we just let init die in order to
* work around an unsolved stack overflow seen very late during
* shutdown on sparc64 when the gmirror worker process exists.
*/
if (p == initproc && rebooting == 0) {
printf("init died (signal %d, exit %d)\n",
WTERMSIG(rv), WEXITSTATUS(rv));
panic("Going nowhere without my init!");
}
/*
* MUST abort all other threads before proceeding past here.
*/
PROC_LOCK(p);
while (p->p_flag & P_HADTHREADS) {
/*
* First check if some other thread got here before us.
* If so, act appropriately: exit or suspend.
*/
thread_suspend_check(0);
/*
* Kill off the other threads. This requires
* some co-operation from other parts of the kernel
* so it may not be instantaneous. With this state set
* any thread entering the kernel from userspace will
* thread_exit() in trap(). Any thread attempting to
* sleep will return immediately with EINTR or EWOULDBLOCK
* which will hopefully force them to back out to userland
* freeing resources as they go. Any thread attempting
* to return to userland will thread_exit() from userret().
* thread_exit() will unsuspend us when the last of the
* other threads exits.
* If there is already a thread singler after resumption,
* calling thread_single will fail; in that case, we just
* re-check all suspension request, the thread should
* either be suspended there or exit.
*/
if (!thread_single(SINGLE_EXIT))
break;
/*
* All other activity in this process is now stopped.
* Threading support has been turned off.
*/
}
KASSERT(p->p_numthreads == 1,
("exit1: proc %p exiting with %d threads", p, p->p_numthreads));
racct_sub(p, RACCT_NTHR, 1);
/*
* Wakeup anyone in procfs' PIOCWAIT. They should have a hold
* on our vmspace, so we should block below until they have
* released their reference to us. Note that if they have
* requested S_EXIT stops we will block here until they ack
* via PIOCCONT.
*/
_STOPEVENT(p, S_EXIT, rv);
/*
* Ignore any pending request to stop due to a stop signal.
* Once P_WEXIT is set, future requests will be ignored as
* well.
*/
p->p_flag &= ~P_STOPPED_SIG;
KASSERT(!P_SHOULDSTOP(p), ("exiting process is stopped"));
/*
* Note that we are exiting and do another wakeup of anyone in
* PIOCWAIT in case they aren't listening for S_EXIT stops or
* decided to wait again after we told them we are exiting.
*/
p->p_flag |= P_WEXIT;
wakeup(&p->p_stype);
/*
* Wait for any processes that have a hold on our vmspace to
* release their reference.
*/
while (p->p_lock > 0)
msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0);
p->p_xstat = rv; /* Let event handler change exit status */
PROC_UNLOCK(p);
//.........这里部分代码省略.........
示例6: linux_rt_sigreturn
/*
* Copied from amd64/amd64/machdep.c
*
* XXX fpu state need? don't think so
*/
int
linux_rt_sigreturn(struct thread *td, struct linux_rt_sigreturn_args *args)
{
struct proc *p;
struct l_ucontext uc;
struct l_sigcontext *context;
struct trapframe *regs;
unsigned long rflags;
int error;
ksiginfo_t ksi;
regs = td->td_frame;
error = copyin((void *)regs->tf_rbx, &uc, sizeof(uc));
if (error != 0)
return (error);
p = td->td_proc;
context = &uc.uc_mcontext;
rflags = context->sc_rflags;
/*
* Don't allow users to change privileged or reserved flags.
*/
/*
* XXX do allow users to change the privileged flag PSL_RF.
* The cpu sets PSL_RF in tf_rflags for faults. Debuggers
* should sometimes set it there too. tf_rflags is kept in
* the signal context during signal handling and there is no
* other place to remember it, so the PSL_RF bit may be
* corrupted by the signal handler without us knowing.
* Corruption of the PSL_RF bit at worst causes one more or
* one less debugger trap, so allowing it is fairly harmless.
*/
#define RFLAG_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
if (!RFLAG_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) {
printf("linux_rt_sigreturn: rflags = 0x%lx\n", rflags);
return (EINVAL);
}
/*
* Don't allow users to load a valid privileged %cs. Let the
* hardware check for invalid selectors, excess privilege in
* other selectors, invalid %eip's and invalid %esp's.
*/
#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
if (!CS_SECURE(context->sc_cs)) {
printf("linux_rt_sigreturn: cs = 0x%x\n", context->sc_cs);
ksiginfo_init_trap(&ksi);
ksi.ksi_signo = SIGBUS;
ksi.ksi_code = BUS_OBJERR;
ksi.ksi_trapno = T_PROTFLT;
ksi.ksi_addr = (void *)regs->tf_rip;
trapsignal(td, &ksi);
return (EINVAL);
}
PROC_LOCK(p);
linux_to_bsd_sigset(&uc.uc_sigmask, &td->td_sigmask);
SIG_CANTMASK(td->td_sigmask);
signotify(td);
PROC_UNLOCK(p);
regs->tf_rdi = context->sc_rdi;
regs->tf_rsi = context->sc_rsi;
regs->tf_rdx = context->sc_rdx;
regs->tf_rbp = context->sc_rbp;
regs->tf_rbx = context->sc_rbx;
regs->tf_rcx = context->sc_rcx;
regs->tf_rax = context->sc_rax;
regs->tf_rip = context->sc_rip;
regs->tf_rsp = context->sc_rsp;
regs->tf_r8 = context->sc_r8;
regs->tf_r9 = context->sc_r9;
regs->tf_r10 = context->sc_r10;
regs->tf_r11 = context->sc_r11;
regs->tf_r12 = context->sc_r12;
regs->tf_r13 = context->sc_r13;
regs->tf_r14 = context->sc_r14;
regs->tf_r15 = context->sc_r15;
regs->tf_cs = context->sc_cs;
regs->tf_err = context->sc_err;
regs->tf_rflags = rflags;
set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
return (EJUSTRETURN);
}
示例7: cheriabi_sendsig
/*
* The CheriABI version of sendsig(9) largely borrows from the MIPS version,
* and it is important to keep them in sync. It differs primarily in that it
* must also be aware of user stack-handling ABIs, so is also sensitive to our
* (fluctuating) design choices in how $stc and $sp interact. The current
* design uses ($stc + $sp) for stack-relative references, so early on we have
* to calculate a 'relocated' version of $sp that we can then use for
* MIPS-style access.
*
* This code, as with the CHERI-aware MIPS code, makes a privilege
* determination in order to decide whether to trust the stack exposed by the
* user code for the purposes of signal handling. We must use the alternative
* stack if there is any indication that using the user thread's stack state
* might violate the userspace compartmentalisation model.
*/
static void
cheriabi_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
{
struct proc *p;
struct thread *td;
struct trapframe *regs;
struct sigacts *psp;
struct sigframe_c sf, *sfp;
uintptr_t stackbase;
vm_offset_t sp;
int cheri_is_sandboxed;
int sig;
int oonstack;
td = curthread;
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
sig = ksi->ksi_signo;
psp = p->p_sigacts;
mtx_assert(&psp->ps_mtx, MA_OWNED);
regs = td->td_frame;
/*
* In CheriABI, $sp is $stc relative, so calculate a relocation base
* that must be combined with regs->sp from this point onwards.
* Unfortunately, we won't retain bounds and permissions information
* (as is the case elsewhere in CheriABI). While 'stackbase'
* suggests that $stc's offset isn't included, in practice it will be,
* although we may reasonably assume that it will be zero.
*
* If it turns out we will be delivering to the alternative signal
* stack, we'll recalculate stackbase later.
*/
CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &td->td_pcb->pcb_regs.stc,
0);
CHERI_CTOPTR(stackbase, CHERI_CR_CTEMP0, CHERI_CR_KDC);
oonstack = sigonstack(stackbase + regs->sp);
/*
* CHERI affects signal delivery in the following ways:
*
* (1) Additional capability-coprocessor state is exposed via
* extensions to the context frame placed on the stack.
*
* (2) If the user $pcc doesn't include CHERI_PERM_SYSCALL, then we
* consider user state to be 'sandboxed' and therefore to require
* special delivery handling which includes a domain-switch to the
* thread's context-switch domain. (This is done by
* cheri_sendsig()).
*
* (3) If an alternative signal stack is not defined, and we are in a
* 'sandboxed' state, then we have two choices: (a) if the signal
* is of type SA_SANDBOX_UNWIND, we will automatically unwind the
* trusted stack by one frame; (b) otherwise, we will terminate
* the process unconditionally.
*/
cheri_is_sandboxed = cheri_signal_sandboxed(td);
/*
* We provide the ability to drop into the debugger in two different
* circumstances: (1) if the code running is sandboxed; and (2) if the
* fault is a CHERI protection fault. Handle both here for the
* non-unwind case. Do this before we rewrite any general-purpose or
* capability register state for the thread.
*/
#if DDB
if (cheri_is_sandboxed && security_cheri_debugger_on_sandbox_signal)
kdb_enter(KDB_WHY_CHERI, "Signal delivery to CHERI sandbox");
else if (sig == SIGPROT && security_cheri_debugger_on_sigprot)
kdb_enter(KDB_WHY_CHERI,
"SIGPROT delivered outside sandbox");
#endif
/*
* If a thread is running sandboxed, we can't rely on $sp which may
* not point at a valid stack in the ambient context, or even be
* maliciously manipulated. We must therefore always use the
* alternative stack. We are also therefore unable to tell whether we
* are on the alternative stack, so must clear 'oonstack' here.
*
* XXXRW: This requires significant further thinking; however, the net
* upshot is that it is not a good idea to do an object-capability
* invoke() from a signal handler, as with so many other things in
* life.
//.........这里部分代码省略.........
示例8: ext2_bmaparray
int
ext2_bmaparray(struct vnode *vp, daddr_t bn, daddr_t *bnp, int *runp, int *runb)
{
struct inode *ip;
struct buf *bp;
struct ext2mount *ump;
struct mount *mp;
struct indir a[NIADDR+1], *ap;
daddr_t daddr;
e2fs_lbn_t metalbn;
int error, num, maxrun = 0, bsize;
int *nump;
ap = NULL;
ip = VTOI(vp);
mp = vp->v_mount;
ump = VFSTOEXT2(mp);
bsize = EXT2_BLOCK_SIZE(ump->um_e2fs);
if (runp) {
maxrun = mp->mnt_iosize_max / bsize - 1;
*runp = 0;
}
if (runb) {
*runb = 0;
}
ap = a;
nump = #
error = ext2_getlbns(vp, bn, ap, nump);
if (error)
return (error);
num = *nump;
if (num == 0) {
*bnp = blkptrtodb(ump, ip->i_db[bn]);
if (*bnp == 0) {
*bnp = -1;
} else if (runp) {
daddr_t bnb = bn;
for (++bn; bn < NDADDR && *runp < maxrun &&
is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
++bn, ++*runp);
bn = bnb;
if (runb && (bn > 0)) {
for (--bn; (bn >= 0) && (*runb < maxrun) &&
is_sequential(ump, ip->i_db[bn],
ip->i_db[bn + 1]);
--bn, ++*runb);
}
}
return (0);
}
/* Get disk address out of indirect block array */
daddr = ip->i_ib[ap->in_off];
for (bp = NULL, ++ap; --num; ++ap) {
/*
* Exit the loop if there is no disk address assigned yet and
* the indirect block isn't in the cache, or if we were
* looking for an indirect block and we've found it.
*/
metalbn = ap->in_lbn;
if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn)
break;
/*
* If we get here, we've either got the block in the cache
* or we have a disk address for it, go fetch it.
*/
if (bp)
bqrelse(bp);
bp = getblk(vp, metalbn, bsize, 0, 0, 0);
if ((bp->b_flags & B_CACHE) == 0) {
#ifdef INVARIANTS
if (!daddr)
panic("ext2_bmaparray: indirect block not in cache");
#endif
bp->b_blkno = blkptrtodb(ump, daddr);
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
vfs_busy_pages(bp, 0);
bp->b_iooffset = dbtob(bp->b_blkno);
bstrategy(bp);
#ifdef RACCT
if (racct_enable) {
PROC_LOCK(curproc);
racct_add_buf(curproc, bp, 0);
PROC_UNLOCK(curproc);
}
#endif
curthread->td_ru.ru_inblock++;
error = bufwait(bp);
//.........这里部分代码省略.........
示例9: linux_clone
int
linux_clone(struct thread *td, struct linux_clone_args *args)
{
int error, ff = RFPROC | RFSTOPPED;
struct proc *p2;
struct thread *td2;
int exit_signal;
struct linux_emuldata *em;
#ifdef DEBUG
if (ldebug(clone)) {
printf(ARGS(clone, "flags %x, stack %p, parent tid: %p, "
"child tid: %p"), (unsigned)args->flags,
args->stack, args->parent_tidptr, args->child_tidptr);
}
#endif
exit_signal = args->flags & 0x000000ff;
if (LINUX_SIG_VALID(exit_signal)) {
if (exit_signal <= LINUX_SIGTBLSZ)
exit_signal =
linux_to_bsd_signal[_SIG_IDX(exit_signal)];
} else if (exit_signal != 0)
return (EINVAL);
if (args->flags & LINUX_CLONE_VM)
ff |= RFMEM;
if (args->flags & LINUX_CLONE_SIGHAND)
ff |= RFSIGSHARE;
/*
* XXX: In Linux, sharing of fs info (chroot/cwd/umask)
* and open files is independant. In FreeBSD, its in one
* structure but in reality it does not cause any problems
* because both of these flags are usually set together.
*/
if (!(args->flags & (LINUX_CLONE_FILES | LINUX_CLONE_FS)))
ff |= RFFDG;
/*
* Attempt to detect when linux_clone(2) is used for creating
* kernel threads. Unfortunately despite the existence of the
* CLONE_THREAD flag, version of linuxthreads package used in
* most popular distros as of beginning of 2005 doesn't make
* any use of it. Therefore, this detection relies on
* empirical observation that linuxthreads sets certain
* combination of flags, so that we can make more or less
* precise detection and notify the FreeBSD kernel that several
* processes are in fact part of the same threading group, so
* that special treatment is necessary for signal delivery
* between those processes and fd locking.
*/
if ((args->flags & 0xffffff00) == LINUX_THREADING_FLAGS)
ff |= RFTHREAD;
if (args->flags & LINUX_CLONE_PARENT_SETTID)
if (args->parent_tidptr == NULL)
return (EINVAL);
error = fork1(td, ff, 0, &p2, NULL, 0);
if (error)
return (error);
if (args->flags & (LINUX_CLONE_PARENT | LINUX_CLONE_THREAD)) {
sx_xlock(&proctree_lock);
PROC_LOCK(p2);
proc_reparent(p2, td->td_proc->p_pptr);
PROC_UNLOCK(p2);
sx_xunlock(&proctree_lock);
}
/* create the emuldata */
error = linux_proc_init(td, p2->p_pid, args->flags);
/* reference it - no need to check this */
em = em_find(p2, EMUL_DOLOCK);
KASSERT(em != NULL, ("clone: emuldata not found."));
/* and adjust it */
if (args->flags & LINUX_CLONE_THREAD) {
#ifdef notyet
PROC_LOCK(p2);
p2->p_pgrp = td->td_proc->p_pgrp;
PROC_UNLOCK(p2);
#endif
exit_signal = 0;
}
if (args->flags & LINUX_CLONE_CHILD_SETTID)
em->child_set_tid = args->child_tidptr;
else
em->child_set_tid = NULL;
if (args->flags & LINUX_CLONE_CHILD_CLEARTID)
em->child_clear_tid = args->child_tidptr;
else
em->child_clear_tid = NULL;
EMUL_UNLOCK(&emul_lock);
if (args->flags & LINUX_CLONE_PARENT_SETTID) {
error = copyout(&p2->p_pid, args->parent_tidptr,
//.........这里部分代码省略.........
示例10: ia32_sendsig
void
ia32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
{
struct ia32_sigframe sf, *sfp;
struct siginfo32 siginfo;
struct proc *p;
struct thread *td;
struct sigacts *psp;
char *sp;
struct trapframe *regs;
char *xfpusave;
size_t xfpusave_len;
int oonstack;
int sig;
siginfo_to_siginfo32(&ksi->ksi_info, &siginfo);
td = curthread;
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
sig = siginfo.si_signo;
psp = p->p_sigacts;
#ifdef COMPAT_FREEBSD4
if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
freebsd4_ia32_sendsig(catcher, ksi, mask);
return;
}
#endif
#ifdef COMPAT_43
if (SIGISMEMBER(psp->ps_osigset, sig)) {
ia32_osendsig(catcher, ksi, mask);
return;
}
#endif
mtx_assert(&psp->ps_mtx, MA_OWNED);
regs = td->td_frame;
oonstack = sigonstack(regs->tf_rsp);
if (cpu_max_ext_state_size > sizeof(struct savefpu) && use_xsave) {
xfpusave_len = cpu_max_ext_state_size - sizeof(struct savefpu);
xfpusave = __builtin_alloca(xfpusave_len);
} else {
xfpusave_len = 0;
xfpusave = NULL;
}
/* Save user context. */
bzero(&sf, sizeof(sf));
sf.sf_uc.uc_sigmask = *mask;
sf.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp;
sf.sf_uc.uc_stack.ss_size = td->td_sigstk.ss_size;
sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
sf.sf_uc.uc_mcontext.mc_edi = regs->tf_rdi;
sf.sf_uc.uc_mcontext.mc_esi = regs->tf_rsi;
sf.sf_uc.uc_mcontext.mc_ebp = regs->tf_rbp;
sf.sf_uc.uc_mcontext.mc_isp = regs->tf_rsp; /* XXX */
sf.sf_uc.uc_mcontext.mc_ebx = regs->tf_rbx;
sf.sf_uc.uc_mcontext.mc_edx = regs->tf_rdx;
sf.sf_uc.uc_mcontext.mc_ecx = regs->tf_rcx;
sf.sf_uc.uc_mcontext.mc_eax = regs->tf_rax;
sf.sf_uc.uc_mcontext.mc_trapno = regs->tf_trapno;
sf.sf_uc.uc_mcontext.mc_err = regs->tf_err;
sf.sf_uc.uc_mcontext.mc_eip = regs->tf_rip;
sf.sf_uc.uc_mcontext.mc_cs = regs->tf_cs;
sf.sf_uc.uc_mcontext.mc_eflags = regs->tf_rflags;
sf.sf_uc.uc_mcontext.mc_esp = regs->tf_rsp;
sf.sf_uc.uc_mcontext.mc_ss = regs->tf_ss;
sf.sf_uc.uc_mcontext.mc_ds = regs->tf_ds;
sf.sf_uc.uc_mcontext.mc_es = regs->tf_es;
sf.sf_uc.uc_mcontext.mc_fs = regs->tf_fs;
sf.sf_uc.uc_mcontext.mc_gs = regs->tf_gs;
sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
ia32_get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
fpstate_drop(td);
sf.sf_uc.uc_mcontext.mc_fsbase = td->td_pcb->pcb_fsbase;
sf.sf_uc.uc_mcontext.mc_gsbase = td->td_pcb->pcb_gsbase;
bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
/* Allocate space for the signal handler context. */
if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
SIGISMEMBER(psp->ps_sigonstack, sig))
sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
else
sp = (char *)regs->tf_rsp;
if (xfpusave != NULL) {
sp -= xfpusave_len;
sp = (char *)((unsigned long)sp & ~0x3Ful);
sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
}
sp -= sizeof(sf);
/* Align to 16 bytes. */
sfp = (struct ia32_sigframe *)((uintptr_t)sp & ~0xF);
PROC_UNLOCK(p);
/* Translate the signal if appropriate. */
if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
/* Build the argument list for the signal handler. */
//.........这里部分代码省略.........
示例11: freebsd4_ia32_sendsig
static void
freebsd4_ia32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
{
struct ia32_sigframe4 sf, *sfp;
struct siginfo32 siginfo;
struct proc *p;
struct thread *td;
struct sigacts *psp;
struct trapframe *regs;
int oonstack;
int sig;
td = curthread;
p = td->td_proc;
siginfo_to_siginfo32(&ksi->ksi_info, &siginfo);
PROC_LOCK_ASSERT(p, MA_OWNED);
sig = siginfo.si_signo;
psp = p->p_sigacts;
mtx_assert(&psp->ps_mtx, MA_OWNED);
regs = td->td_frame;
oonstack = sigonstack(regs->tf_rsp);
/* Save user context. */
bzero(&sf, sizeof(sf));
sf.sf_uc.uc_sigmask = *mask;
sf.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp;
sf.sf_uc.uc_stack.ss_size = td->td_sigstk.ss_size;
sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
sf.sf_uc.uc_mcontext.mc_edi = regs->tf_rdi;
sf.sf_uc.uc_mcontext.mc_esi = regs->tf_rsi;
sf.sf_uc.uc_mcontext.mc_ebp = regs->tf_rbp;
sf.sf_uc.uc_mcontext.mc_isp = regs->tf_rsp; /* XXX */
sf.sf_uc.uc_mcontext.mc_ebx = regs->tf_rbx;
sf.sf_uc.uc_mcontext.mc_edx = regs->tf_rdx;
sf.sf_uc.uc_mcontext.mc_ecx = regs->tf_rcx;
sf.sf_uc.uc_mcontext.mc_eax = regs->tf_rax;
sf.sf_uc.uc_mcontext.mc_trapno = regs->tf_trapno;
sf.sf_uc.uc_mcontext.mc_err = regs->tf_err;
sf.sf_uc.uc_mcontext.mc_eip = regs->tf_rip;
sf.sf_uc.uc_mcontext.mc_cs = regs->tf_cs;
sf.sf_uc.uc_mcontext.mc_eflags = regs->tf_rflags;
sf.sf_uc.uc_mcontext.mc_esp = regs->tf_rsp;
sf.sf_uc.uc_mcontext.mc_ss = regs->tf_ss;
sf.sf_uc.uc_mcontext.mc_ds = regs->tf_ds;
sf.sf_uc.uc_mcontext.mc_es = regs->tf_es;
sf.sf_uc.uc_mcontext.mc_fs = regs->tf_fs;
sf.sf_uc.uc_mcontext.mc_gs = regs->tf_gs;
bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
bzero(sf.sf_uc.uc_mcontext.__spare__,
sizeof(sf.sf_uc.uc_mcontext.__spare__));
bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
/* Allocate space for the signal handler context. */
if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
SIGISMEMBER(psp->ps_sigonstack, sig)) {
sfp = (struct ia32_sigframe4 *)(td->td_sigstk.ss_sp +
td->td_sigstk.ss_size - sizeof(sf));
} else
sfp = (struct ia32_sigframe4 *)regs->tf_rsp - 1;
PROC_UNLOCK(p);
/* Translate the signal if appropriate. */
if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
/* Build the argument list for the signal handler. */
sf.sf_signum = sig;
sf.sf_ucontext = (register_t)&sfp->sf_uc;
bzero(&sf.sf_si, sizeof(sf.sf_si));
if (SIGISMEMBER(psp->ps_siginfo, sig)) {
/* Signal handler installed with SA_SIGINFO. */
sf.sf_siginfo = (u_int32_t)(uintptr_t)&sfp->sf_si;
sf.sf_ah = (u_int32_t)(uintptr_t)catcher;
/* Fill in POSIX parts */
sf.sf_si = siginfo;
sf.sf_si.si_signo = sig;
} else {
/* Old FreeBSD-style arguments. */
sf.sf_siginfo = siginfo.si_code;
sf.sf_addr = (u_int32_t)siginfo.si_addr;
sf.sf_ah = (u_int32_t)(uintptr_t)catcher;
}
mtx_unlock(&psp->ps_mtx);
/*
* Copy the sigframe out to the user's stack.
*/
if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
#ifdef DEBUG
printf("process %ld has trashed its stack\n", (long)p->p_pid);
#endif
PROC_LOCK(p);
sigexit(td, SIGILL);
}
//.........这里部分代码省略.........
示例12: thread_create
int
thread_create(struct thread *td, struct rtprio *rtp,
int (*initialize_thread)(struct thread *, void *), void *thunk)
{
struct thread *newtd;
struct proc *p;
int error;
p = td->td_proc;
if (rtp != NULL) {
switch(rtp->type) {
case RTP_PRIO_REALTIME:
case RTP_PRIO_FIFO:
/* Only root can set scheduler policy */
if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0)
return (EPERM);
if (rtp->prio > RTP_PRIO_MAX)
return (EINVAL);
break;
case RTP_PRIO_NORMAL:
rtp->prio = 0;
break;
default:
return (EINVAL);
}
}
#ifdef RACCT
if (racct_enable) {
PROC_LOCK(p);
error = racct_add(p, RACCT_NTHR, 1);
PROC_UNLOCK(p);
if (error != 0)
return (EPROCLIM);
}
#endif
/* Initialize our td */
error = kern_thr_alloc(p, 0, &newtd);
if (error)
goto fail;
cpu_set_upcall(newtd, td);
bzero(&newtd->td_startzero,
__rangeof(struct thread, td_startzero, td_endzero));
bcopy(&td->td_startcopy, &newtd->td_startcopy,
__rangeof(struct thread, td_startcopy, td_endcopy));
newtd->td_proc = td->td_proc;
thread_cow_get(newtd, td);
error = initialize_thread(newtd, thunk);
if (error != 0) {
thread_cow_free(newtd);
thread_free(newtd);
goto fail;
}
PROC_LOCK(p);
p->p_flag |= P_HADTHREADS;
thread_link(newtd, p);
bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
newtd->td_pax = p->p_pax;
thread_lock(td);
/* let the scheduler know about these things. */
sched_fork_thread(td, newtd);
thread_unlock(td);
if (P_SHOULDSTOP(p))
newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
if (p->p_flag2 & P2_LWP_EVENTS)
newtd->td_dbgflags |= TDB_BORN;
/*
* Copy the existing thread VM policy into the new thread.
*/
vm_domain_policy_localcopy(&newtd->td_vm_dom_policy,
&td->td_vm_dom_policy);
PROC_UNLOCK(p);
tidhash_add(newtd);
thread_lock(newtd);
if (rtp != NULL) {
if (!(td->td_pri_class == PRI_TIMESHARE &&
rtp->type == RTP_PRIO_NORMAL)) {
rtp_to_pri(rtp, newtd);
sched_prio(newtd, newtd->td_user_pri);
} /* ignore timesharing class */
}
TD_SET_CAN_RUN(newtd);
sched_add(newtd, SRQ_BORING);
thread_unlock(newtd);
return (0);
fail:
#ifdef RACCT
if (racct_enable) {
//.........这里部分代码省略.........
示例13: cheriabi_sysarch
int
cheriabi_sysarch(struct thread *td, struct cheriabi_sysarch_args *uap)
{
struct trapframe *regs = &td->td_pcb->pcb_regs;
int error;
int parms_from_cap = 1;
size_t reqsize;
register_t reqperms;
/*
* The sysarch() fill_uap function is machine-independent so can not
* check the validity of the capabilty which becomes uap->parms. As
* such, it makes no attempt to convert the result. We need to
* perform those checks here.
*/
switch (uap->op) {
case MIPS_SET_TLS:
reqsize = 0;
reqperms = 0;
break;
case MIPS_GET_TLS:
case CHERI_GET_STACK:
case CHERI_GET_TYPECAP:
reqsize = sizeof(struct chericap);
reqperms = CHERI_PERM_STORE|CHERI_PERM_STORE_CAP;
break;
case CHERI_SET_STACK:
reqsize = sizeof(struct chericap);
reqperms = CHERI_PERM_LOAD|CHERI_PERM_LOAD_CAP;
break;
case CHERI_MMAP_GETBASE:
case CHERI_MMAP_GETLEN:
case CHERI_MMAP_GETOFFSET:
case CHERI_MMAP_GETPERM:
case CHERI_MMAP_SETOFFSET:
case CHERI_MMAP_SETBOUNDS:
reqsize = sizeof(uint64_t);
reqperms = CHERI_PERM_STORE;
break;
case CHERI_MMAP_ANDPERM:
reqsize = sizeof(uint64_t);
reqperms = CHERI_PERM_LOAD|CHERI_PERM_STORE;
break;
case MIPS_GET_COUNT:
parms_from_cap = 0;
break;
#ifdef CPU_QEMU_MALTA
case QEMU_GET_QTRACE:
reqsize = sizeof(int);
reqperms = CHERI_PERM_STORE;
break;
case QEMU_SET_QTRACE:
reqsize = sizeof(int);
reqperms = CHERI_PERM_LOAD;
break;
#endif
default:
return (EINVAL);
}
if (parms_from_cap) {
error = cheriabi_cap_to_ptr(&uap->parms, ®s->c3,
reqsize, reqperms, 0);
if (error != 0)
return (error);
}
switch (uap->op) {
case MIPS_SET_TLS:
return (cheriabi_set_user_tls(td, ®s->c3));
case MIPS_GET_TLS:
error = copyoutcap(&td->td_md.md_tls_cap, uap->parms,
sizeof(struct chericap));
return (error);
case CHERI_MMAP_GETBASE: {
size_t base;
PROC_LOCK(td->td_proc);
CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC,
&td->td_proc->p_md.md_cheri_mmap_cap, 0);
CHERI_CGETBASE(base, CHERI_CR_CTEMP0);
PROC_UNLOCK(td->td_proc);
if (suword64(uap->parms, base) != 0)
return (EFAULT);
return (0);
}
case CHERI_MMAP_GETLEN: {
size_t len;
PROC_LOCK(td->td_proc);
//.........这里部分代码省略.........
示例14: sendsig
void
sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
{
struct thread *td;
struct proc *p;
struct trapframe *tf;
struct sigframe *fp, frame;
struct sigacts *psp;
int code, onstack, sig;
td = curthread;
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
sig = ksi->ksi_signo;
code = ksi->ksi_code;
psp = p->p_sigacts;
mtx_assert(&psp->ps_mtx, MA_OWNED);
tf = td->td_frame;
onstack = sigonstack(tf->tf_sp);
CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
catcher, sig);
/* Allocate and validate space for the signal handler context. */
if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
SIGISMEMBER(psp->ps_sigonstack, sig)) {
fp = (struct sigframe *)(td->td_sigstk.ss_sp +
td->td_sigstk.ss_size);
#if defined(COMPAT_43)
td->td_sigstk.ss_flags |= SS_ONSTACK;
#endif
} else {
fp = (struct sigframe *)td->td_frame->tf_sp;
}
/* Make room, keeping the stack aligned */
fp--;
fp = (struct sigframe *)STACKALIGN(fp);
/* Fill in the frame to copy out */
get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
get_fpcontext(td, &frame.sf_uc.uc_mcontext);
frame.sf_si = ksi->ksi_info;
frame.sf_uc.uc_sigmask = *mask;
frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
frame.sf_uc.uc_stack = td->td_sigstk;
mtx_unlock(&psp->ps_mtx);
PROC_UNLOCK(td->td_proc);
/* Copy the sigframe out to the user's stack. */
if (copyout(&frame, fp, sizeof(*fp)) != 0) {
/* Process has trashed its stack. Kill it. */
CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
PROC_LOCK(p);
sigexit(td, SIGILL);
}
tf->tf_x[0]= sig;
tf->tf_x[1] = (register_t)&fp->sf_si;
tf->tf_x[2] = (register_t)&fp->sf_uc;
tf->tf_elr = (register_t)catcher;
tf->tf_sp = (register_t)fp;
tf->tf_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
tf->tf_sp);
PROC_LOCK(p);
mtx_lock(&psp->ps_mtx);
}
示例15: linux_clone_proc
static int
linux_clone_proc(struct thread *td, struct linux_clone_args *args)
{
struct fork_req fr;
int error, ff = RFPROC | RFSTOPPED;
struct proc *p2;
struct thread *td2;
int exit_signal;
struct linux_emuldata *em;
#ifdef DEBUG
if (ldebug(clone)) {
printf(ARGS(clone, "flags %x, stack %p, parent tid: %p, "
"child tid: %p"), (unsigned)args->flags,
args->stack, args->parent_tidptr, args->child_tidptr);
}
#endif
exit_signal = args->flags & 0x000000ff;
if (LINUX_SIG_VALID(exit_signal)) {
exit_signal = linux_to_bsd_signal(exit_signal);
} else if (exit_signal != 0)
return (EINVAL);
if (args->flags & LINUX_CLONE_VM)
ff |= RFMEM;
if (args->flags & LINUX_CLONE_SIGHAND)
ff |= RFSIGSHARE;
/*
* XXX: In Linux, sharing of fs info (chroot/cwd/umask)
* and open files is independent. In FreeBSD, its in one
* structure but in reality it does not cause any problems
* because both of these flags are usually set together.
*/
if (!(args->flags & (LINUX_CLONE_FILES | LINUX_CLONE_FS)))
ff |= RFFDG;
if (args->flags & LINUX_CLONE_PARENT_SETTID)
if (args->parent_tidptr == NULL)
return (EINVAL);
if (args->flags & LINUX_CLONE_VFORK)
ff |= RFPPWAIT;
bzero(&fr, sizeof(fr));
fr.fr_flags = ff;
fr.fr_procp = &p2;
error = fork1(td, &fr);
if (error)
return (error);
td2 = FIRST_THREAD_IN_PROC(p2);
/* create the emuldata */
linux_proc_init(td, td2, args->flags);
em = em_find(td2);
KASSERT(em != NULL, ("clone_proc: emuldata not found.\n"));
if (args->flags & LINUX_CLONE_CHILD_SETTID)
em->child_set_tid = args->child_tidptr;
else
em->child_set_tid = NULL;
if (args->flags & LINUX_CLONE_CHILD_CLEARTID)
em->child_clear_tid = args->child_tidptr;
else
em->child_clear_tid = NULL;
if (args->flags & LINUX_CLONE_PARENT_SETTID) {
error = copyout(&p2->p_pid, args->parent_tidptr,
sizeof(p2->p_pid));
if (error)
printf(LMSG("copyout failed!"));
}
PROC_LOCK(p2);
p2->p_sigparent = exit_signal;
PROC_UNLOCK(p2);
/*
* In a case of stack = NULL, we are supposed to COW calling process
* stack. This is what normal fork() does, so we just keep tf_rsp arg
* intact.
*/
linux_set_upcall_kse(td2, PTROUT(args->stack));
if (args->flags & LINUX_CLONE_SETTLS)
linux_set_cloned_tls(td2, args->tls);
/*
* If CLONE_PARENT is set, then the parent of the new process will be
* the same as that of the calling process.
*/
if (args->flags & LINUX_CLONE_PARENT) {
sx_xlock(&proctree_lock);
PROC_LOCK(p2);
proc_reparent(p2, td->td_proc->p_pptr);
PROC_UNLOCK(p2);
sx_xunlock(&proctree_lock);
}
//.........这里部分代码省略.........