本文整理汇总了C++中write_unlock_irq函数的典型用法代码示例。如果您正苦于以下问题:C++ write_unlock_irq函数的具体用法?C++ write_unlock_irq怎么用?C++ write_unlock_irq使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了write_unlock_irq函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: masq_wait
/*-------------------------------------------------------------------------
* bproc_masq_wait
*/
int masq_wait(pid_t pid, int options, struct siginfo *infop,
unsigned int * stat_addr, struct rusage * ru) {
int result, lpid, status;
struct bproc_krequest_t *req;
struct bproc_rsyscall_msg_t *msg;
struct bproc_wait_resp_t *resp_msg;
struct task_struct *child;
/* XXX to be 100% semantically correct, we need to verify_area
* here on stat_addr and ru here... */
req = bpr_rsyscall1(BPROC_SYS_WAIT);
if (!req){
printk("bproc: masq: sys_wait: out of memory.\n");
return -ENOMEM;
}
msg = (struct bproc_rsyscall_msg_t *) bproc_msg(req);
msg->arg[0] = pid;
msg->arg[1] = options;
if (bpr_rsyscall2(BPROC_MASQ_MASTER(current), req, 1) != 0) {
bproc_put_req(req);
return -EIO;
}
resp_msg = bproc_msg(req->response);
result = resp_msg->hdr.result;
status = resp_msg->status;
if (stat_addr) put_user(status, stat_addr);
if (ru) {
/* Only a few elements of the rusage structure are provided by
* Linux. Also, convert the times back to the HZ
* representation. */
struct rusage ru_tmp;
memset(&ru_tmp, 0, sizeof(ru_tmp));
ru_tmp.ru_utime.tv_sec = resp_msg->utime/HZ;
ru_tmp.ru_utime.tv_usec = (resp_msg->utime*(1000000/HZ))%1000000;
ru_tmp.ru_stime.tv_sec = resp_msg->stime/HZ;
ru_tmp.ru_stime.tv_usec = (resp_msg->stime*(1000000/HZ))%1000000;
ru_tmp.ru_minflt = resp_msg->minflt;
ru_tmp.ru_majflt = resp_msg->majflt;
ru_tmp.ru_nswap = resp_msg->nswap;
copy_to_user(ru, &ru_tmp, sizeof(ru_tmp));
}
bproc_put_req(req);
if (result > 0 && (status & 0xff) != 0x7f) {
/* It's possible that the process we waited on was actually
* local. We need to make sure and get it out of this process
* tree too. If it's not, we need to down the non local child
* count by one... */
write_lock_irq(&tasklist_lock);
child = masq_find_task_by_pid(BPROC_MASQ_MASTER(current), result);
if (child)
lpid = child->pid;
else {
lpid = 0;
current->bproc.nlchild--;
}
write_unlock_irq(&tasklist_lock);
if (lpid) {
/* Do all of this as a kernel call to avoid re-entering
* this whole mess... */
set_bit(BPROC_FLAG_KCALL, ¤t->bproc.flag);
if (k_wait4(lpid, options & ~WNOHANG, NULL, NULL, NULL) == -1) {
printk(KERN_ERR "bproc: masq: local wait failed on %d (%d)\n",
lpid, result);
#if 0
/* This probably isn't correct. If we fail to wait,
* that probably means that somebody else picked it
* up. */
write_lock_irq(&tasklist_lock);
current->bproc.nlchild--;
write_unlock_irq(&tasklist_lock);
#endif
}
}
}
return result;
}
示例2: hp_sdc_put
//.........这里部分代码省略.........
while (i < 4 && w7[i] == hp_sdc.r7[i])
i++;
if (i < 4) {
hp_sdc_status_out8(HP_SDC_CMD_SET_D0 + i);
hp_sdc.wi = 0x70 + i;
goto finish;
}
idx++;
if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAREG)
goto actdone;
curr->idx = idx;
act &= ~HP_SDC_ACT_DATAREG;
break;
}
hp_sdc_data_out8(w7[hp_sdc.wi - 0x70]);
hp_sdc.r7[hp_sdc.wi - 0x70] = w7[hp_sdc.wi - 0x70];
hp_sdc.wi++; /* write index register autoincrements */
{
int i = 0;
while ((i < 4) && w7[i] == hp_sdc.r7[i])
i++;
if (i >= 4) {
curr->idx = idx + 1;
if ((act & HP_SDC_ACT_DURING) ==
HP_SDC_ACT_DATAREG)
goto actdone;
}
}
goto finish;
}
/* We don't go any further in the command if there is a pending read,
because we don't want interleaved results. */
read_lock_irq(&hp_sdc.rtq_lock);
if (hp_sdc.rcurr >= 0) {
read_unlock_irq(&hp_sdc.rtq_lock);
goto finish;
}
read_unlock_irq(&hp_sdc.rtq_lock);
if (act & HP_SDC_ACT_POSTCMD) {
uint8_t postcmd;
/* curr->idx should == idx at this point. */
postcmd = curr->seq[idx];
curr->idx++;
if (act & HP_SDC_ACT_DATAIN) {
/* Start a new read */
hp_sdc.rqty = curr->seq[curr->idx];
do_gettimeofday(&hp_sdc.rtv);
curr->idx++;
/* Still need to lock here in case of spurious irq. */
write_lock_irq(&hp_sdc.rtq_lock);
hp_sdc.rcurr = curridx;
write_unlock_irq(&hp_sdc.rtq_lock);
hp_sdc_status_out8(postcmd);
goto finish;
}
hp_sdc_status_out8(postcmd);
goto actdone;
}
actdone:
if (act & HP_SDC_ACT_SEMAPHORE)
up(curr->act.semaphore);
else if (act & HP_SDC_ACT_CALLBACK)
curr->act.irqhook(0,NULL,0,0);
if (curr->idx >= curr->endidx) { /* This transaction is over. */
if (act & HP_SDC_ACT_DEALLOC)
kfree(curr);
hp_sdc.tq[curridx] = NULL;
} else {
curr->actidx = idx + 1;
curr->idx = idx + 2;
}
/* Interleave outbound data between the transactions. */
hp_sdc.wcurr++;
if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
hp_sdc.wcurr = 0;
finish:
/* If by some quirk IBF has cleared and our ISR has run to
see that that has happened, do it all again. */
if (!hp_sdc.ibf && limit++ < 20)
goto anew;
done:
if (hp_sdc.wcurr >= 0)
tasklet_schedule(&hp_sdc.task);
write_unlock(&hp_sdc.lock);
return 0;
}
示例3: vcc_remove_socket
static void vcc_remove_socket(struct sock *sk)
{
write_lock_irq(&vcc_sklist_lock);
sk_del_node_init(sk);
write_unlock_irq(&vcc_sklist_lock);
}
示例4: zfcp_adapter_enqueue
/**
* zfcp_adapter_enqueue - enqueue a new adapter to the list
* @ccw_device: pointer to the struct cc_device
*
* Returns: 0 if a new adapter was successfully enqueued
* -ENOMEM if alloc failed
* Enqueues an adapter at the end of the adapter list in the driver data.
* All adapter internal structures are set up.
* Proc-fs entries are also created.
* locks: config_sema must be held to serialise changes to the adapter list
*/
int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
/*
* Note: It is safe to release the list_lock, as any list changes
* are protected by the config_sema, which must be held to get here
*/
adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
if (!adapter)
return -ENOMEM;
ccw_device->handler = NULL;
adapter->ccw_device = ccw_device;
atomic_set(&adapter->refcount, 0);
if (zfcp_qdio_allocate(adapter))
goto qdio_allocate_failed;
if (zfcp_allocate_low_mem_buffers(adapter))
goto failed_low_mem_buffers;
if (zfcp_reqlist_alloc(adapter))
goto failed_low_mem_buffers;
if (zfcp_adapter_debug_register(adapter))
goto debug_register_failed;
init_waitqueue_head(&adapter->remove_wq);
init_waitqueue_head(&adapter->erp_thread_wqh);
init_waitqueue_head(&adapter->erp_done_wqh);
INIT_LIST_HEAD(&adapter->port_list_head);
INIT_LIST_HEAD(&adapter->port_remove_lh);
INIT_LIST_HEAD(&adapter->erp_ready_head);
INIT_LIST_HEAD(&adapter->erp_running_head);
spin_lock_init(&adapter->req_list_lock);
spin_lock_init(&adapter->hba_dbf_lock);
spin_lock_init(&adapter->san_dbf_lock);
spin_lock_init(&adapter->scsi_dbf_lock);
spin_lock_init(&adapter->rec_dbf_lock);
spin_lock_init(&adapter->req_q.lock);
rwlock_init(&adapter->erp_lock);
rwlock_init(&adapter->abort_lock);
sema_init(&adapter->erp_ready_sem, 0);
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later);
/* mark adapter unusable as long as sysfs registration is not complete */
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
dev_set_drvdata(&ccw_device->dev, adapter);
if (sysfs_create_group(&ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs))
goto sysfs_failed;
adapter->generic_services.parent = &adapter->ccw_device->dev;
adapter->generic_services.release = zfcp_dummy_release;
snprintf(adapter->generic_services.bus_id, BUS_ID_SIZE,
"generic_services");
if (device_register(&adapter->generic_services))
goto generic_services_failed;
write_lock_irq(&zfcp_data.config_lock);
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
write_unlock_irq(&zfcp_data.config_lock);
zfcp_data.adapters++;
zfcp_nameserver_enqueue(adapter);
return 0;
generic_services_failed:
sysfs_remove_group(&ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs);
sysfs_failed:
zfcp_adapter_debug_unregister(adapter);
debug_register_failed:
dev_set_drvdata(&ccw_device->dev, NULL);
//.........这里部分代码省略.........
示例5: ib_cache_update
static void ib_cache_update(struct ib_device *device,
u8 port)
{
struct ib_port_attr *tprops = NULL;
struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
struct ib_gid_cache *gid_cache = NULL, *old_gid_cache;
int i;
int ret;
tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
if (!tprops)
return;
ret = ib_query_port(device, port, tprops);
if (ret) {
printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
ret, device->name);
goto err;
}
pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
sizeof *pkey_cache->table, GFP_KERNEL);
if (!pkey_cache)
goto err;
pkey_cache->table_len = tprops->pkey_tbl_len;
gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
sizeof *gid_cache->table, GFP_KERNEL);
if (!gid_cache)
goto err;
gid_cache->table_len = tprops->gid_tbl_len;
for (i = 0; i < pkey_cache->table_len; ++i) {
ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
if (ret) {
printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
ret, device->name, i);
goto err;
}
}
for (i = 0; i < gid_cache->table_len; ++i) {
ret = ib_query_gid(device, port, i, gid_cache->table + i);
if (ret) {
printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
ret, device->name, i);
goto err;
}
}
write_lock_irq(&device->cache.lock);
old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
old_gid_cache = device->cache.gid_cache [port - start_port(device)];
device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
device->cache.gid_cache [port - start_port(device)] = gid_cache;
device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
write_unlock_irq(&device->cache.lock);
kfree(old_pkey_cache);
kfree(old_gid_cache);
kfree(tprops);
return;
err:
kfree(pkey_cache);
kfree(gid_cache);
kfree(tprops);
}
示例6: sys_setpgid
asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
{
struct task_struct *p;
int err = -EINVAL;
if (!pid)
pid = current->pid;
if (!pgid)
pgid = pid;
if (pgid < 0)
return -EINVAL;
/* From this point forward we keep holding onto the tasklist lock
* so that our parent does not change from under us. -DaveM
*/
write_lock_irq(&tasklist_lock);
err = -ESRCH;
p = find_task_by_pid(pid);
if (!p)
goto out;
err = -EINVAL;
if (!thread_group_leader(p))
goto out;
if (p->parent == current || p->real_parent == current) {
err = -EPERM;
if (p->signal->session != current->signal->session)
goto out;
err = -EACCES;
if (p->did_exec)
goto out;
} else {
err = -ESRCH;
if (p != current)
goto out;
}
err = -EPERM;
if (p->signal->leader)
goto out;
if (pgid != pid) {
struct task_struct *p;
do_each_task_pid(pgid, PIDTYPE_PGID, p) {
if (p->signal->session == current->signal->session)
goto ok_pgid;
} while_each_task_pid(pgid, PIDTYPE_PGID, p);
goto out;
}
ok_pgid:
err = security_task_setpgid(p, pgid);
if (err)
goto out;
if (process_group(p) != pgid) {
detach_pid(p, PIDTYPE_PGID);
p->signal->pgrp = pgid;
attach_pid(p, PIDTYPE_PGID, pgid);
}
err = 0;
out:
/* All paths lead to here, thus we are safe. -DaveM */
write_unlock_irq(&tasklist_lock);
return err;
}
示例7: ib_cache_update
//.........这里部分代码省略.........
}
pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
sizeof *pkey_cache->entry, GFP_KERNEL);
if (!pkey_cache)
goto err;
pkey_cache->table_len = 0;
gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
sizeof *gid_cache->entry, GFP_KERNEL);
if (!gid_cache)
goto err;
gid_cache->table_len = 0;
for (i = 0, j = 0; i < tprops->pkey_tbl_len; ++i) {
ret = ib_query_pkey(device, port, i, &pkey);
if (ret) {
printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
ret, device->name, i);
goto err;
}
/* pkey 0xffff must be the default pkeyand 0x0000 must be the invalid
* pkey per IBTA spec */
if (pkey) {
pkey_cache->entry[j].index = i;
pkey_cache->entry[j++].pkey = pkey;
}
}
pkey_cache->table_len = j;
memset(&empty_gid, 0, sizeof empty_gid);
for (i = 0, j = 0; i < tprops->gid_tbl_len; ++i) {
ret = ib_query_gid(device, port, i, &gid);
if (ret) {
printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
ret, device->name, i);
goto err;
}
/* if the lower 8 bytes the device GID entry is all 0,
* our entry is a blank, invalid entry...
* depending on device, the upper 8 bytes might or might
* not be prefilled with a valid subnet prefix, so
* don't rely on them for determining a valid gid
* entry
*/
if (memcmp(&gid + 8, &empty_gid + 8, sizeof gid - 8)) {
gid_cache->entry[j].index = i;
gid_cache->entry[j++].gid = gid;
}
}
gid_cache->table_len = j;
old_pkey_cache = pkey_cache;
pkey_cache = kmalloc(sizeof *pkey_cache + old_pkey_cache->table_len *
sizeof *pkey_cache->entry, GFP_KERNEL);
if (!pkey_cache)
pkey_cache = old_pkey_cache;
else {
pkey_cache->table_len = old_pkey_cache->table_len;
memcpy(&pkey_cache->entry[0], &old_pkey_cache->entry[0],
pkey_cache->table_len * sizeof *pkey_cache->entry);
kfree(old_pkey_cache);
}
old_gid_cache = gid_cache;
gid_cache = kmalloc(sizeof *gid_cache + old_gid_cache->table_len *
sizeof *gid_cache->entry, GFP_KERNEL);
if (!gid_cache)
gid_cache = old_gid_cache;
else {
gid_cache->table_len = old_gid_cache->table_len;
memcpy(&gid_cache->entry[0], &old_gid_cache->entry[0],
gid_cache->table_len * sizeof *gid_cache->entry);
kfree(old_gid_cache);
}
write_lock_irq(&device->cache.lock);
old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
old_gid_cache = device->cache.gid_cache [port - start_port(device)];
device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
device->cache.gid_cache [port - start_port(device)] = gid_cache;
device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
write_unlock_irq(&device->cache.lock);
kfree(old_pkey_cache);
kfree(old_gid_cache);
kfree(tprops);
return;
err:
kfree(pkey_cache);
kfree(gid_cache);
kfree(tprops);
}
示例8: ERR_PTR
//.........这里部分代码省略.........
else
p->exit_signal = (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
p->exit_state = 0;
p->nr_dirtied = 0;
p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
p->dirty_paused_when = 0;
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
cgroup_fork_callbacks(p);
cgroup_callbacks_done = 1;
write_lock_irq(&tasklist_lock);
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
p->parent_exec_id = current->parent_exec_id;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
}
spin_lock(¤t->sighand->siglock);
recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_free_pid;
}
if (clone_flags & CLONE_THREAD) {
current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
atomic_inc(¤t->signal->sigcnt);
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
}
if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
if (thread_group_leader(p)) {
if (is_child_reaper(pid))
p->nsproxy->pid_ns->child_reaper = p;
p->signal->leader_pid = pid;
p->signal->tty = tty_kref_get(current->signal->tty);
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail(&p->sibling, &p->real_parent->children);
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__this_cpu_inc(process_counts);
}
attach_pid(p, PIDTYPE_PID, pid);
nr_threads++;
}
total_forks++;
示例9: read_lock_irq
/**
* zfcp_unit_enqueue - enqueue unit to unit list of a port.
* @port: pointer to port where unit is added
* @fcp_lun: FCP LUN of unit to be enqueued
* Returns: pointer to enqueued unit on success, ERR_PTR on error
* Locks: config_mutex must be held to serialize changes to the unit list
*
* Sets up some unit internal structures and creates sysfs entry.
*/
struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
read_lock_irq(&zfcp_data.config_lock);
if (zfcp_get_unit_by_lun(port, fcp_lun)) {
read_unlock_irq(&zfcp_data.config_lock);
return ERR_PTR(-EINVAL);
}
read_unlock_irq(&zfcp_data.config_lock);
unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
if (!unit)
return ERR_PTR(-ENOMEM);
atomic_set(&unit->refcount, 0);
init_waitqueue_head(&unit->remove_wq);
INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work);
unit->port = port;
unit->fcp_lun = fcp_lun;
if (dev_set_name(&unit->sysfs_device, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
return ERR_PTR(-ENOMEM);
}
unit->sysfs_device.parent = &port->sysfs_device;
unit->sysfs_device.release = zfcp_sysfs_unit_release;
dev_set_drvdata(&unit->sysfs_device, unit);
/* mark unit unusable as long as sysfs registration is not complete */
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
spin_lock_init(&unit->latencies.lock);
unit->latencies.write.channel.min = 0xFFFFFFFF;
unit->latencies.write.fabric.min = 0xFFFFFFFF;
unit->latencies.read.channel.min = 0xFFFFFFFF;
unit->latencies.read.fabric.min = 0xFFFFFFFF;
unit->latencies.cmd.channel.min = 0xFFFFFFFF;
unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
if (device_register(&unit->sysfs_device)) {
put_device(&unit->sysfs_device);
return ERR_PTR(-EINVAL);
}
if (sysfs_create_group(&unit->sysfs_device.kobj,
&zfcp_sysfs_unit_attrs)) {
device_unregister(&unit->sysfs_device);
return ERR_PTR(-EINVAL);
}
zfcp_unit_get(unit);
write_lock_irq(&zfcp_data.config_lock);
list_add_tail(&unit->list, &port->unit_list_head);
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
write_unlock_irq(&zfcp_data.config_lock);
zfcp_port_get(port);
return unit;
}
示例10: sys_ptrace
long sys_ptrace(long request, pid_t pid, long addr, long data)
{
struct task_struct *child;
long ret;
lock_kernel();
ret = -EPERM;
if (request == PTRACE_TRACEME) {
/* are we already being traced? */
if (current->ptrace & PT_PTRACED)
goto out;
/* set the ptrace bit in the process flags. */
current->ptrace |= PT_PTRACED;
ret = 0;
goto out;
}
ret = -ESRCH;
read_lock(&tasklist_lock);
child = find_task_by_pid(pid);
if (child)
get_task_struct(child);
read_unlock(&tasklist_lock);
if (!child)
goto out;
ret = -EPERM;
if (pid == 1) /* no messing around with init! */
goto out_tsk;
if (request == PTRACE_ATTACH) {
if (child == current)
goto out_tsk;
if ((!child->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
(current->gid != child->egid) ||
(current->gid != child->sgid) ||
(!cap_issubset(child->cap_permitted, current->cap_permitted)) ||
(current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
goto out_tsk;
/* the same process cannot be attached many times */
if (child->ptrace & PT_PTRACED)
goto out_tsk;
child->ptrace |= PT_PTRACED;
if (child->p_pptr != current) {
unsigned long flags;
write_lock_irqsave(&tasklist_lock, flags);
REMOVE_LINKS(child);
child->p_pptr = current;
SET_LINKS(child);
write_unlock_irqrestore(&tasklist_lock, flags);
}
send_sig(SIGSTOP, child, 1);
ret = 0;
goto out_tsk;
}
ret = -ESRCH;
if (!(child->ptrace & PT_PTRACED))
goto out_tsk;
if (child->state != TASK_STOPPED) {
if (request != PTRACE_KILL)
goto out_tsk;
}
if (child->p_pptr != current)
goto out_tsk;
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA: {
unsigned long tmp;
int copied;
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
ret = -EIO;
if (copied != sizeof(tmp))
goto out_tsk;
ret = put_user(tmp,(unsigned long *) data);
goto out_tsk;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
ret = 0;
if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
goto out_tsk;
ret = -EIO;
goto out_tsk;
/* Read the word at location addr in the USER area. This will need
to change when the kernel no longer saves all regs on a syscall. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & 3) || (unsigned long) addr >= sizeof(struct pt_regs))
goto out_tsk;
//.........这里部分代码省略.........
示例11: ptrace_attach
static int ptrace_attach(struct task_struct *task)
{
bool wait_trap = false;
int retval;
audit_ptrace(task);
retval = -EPERM;
if (unlikely(task->flags & PF_KTHREAD))
goto out;
if (same_thread_group(task, current))
goto out;
/*
* Protect exec's credential calculations against our interference;
* interference; SUID, SGID and LSM creds get determined differently
* under ptrace.
*/
retval = -ERESTARTNOINTR;
if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
goto out;
task_lock(task);
retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
task_unlock(task);
if (retval)
goto unlock_creds;
write_lock_irq(&tasklist_lock);
retval = -EPERM;
if (unlikely(task->exit_state))
goto unlock_tasklist;
if (task->ptrace)
goto unlock_tasklist;
task->ptrace = PT_PTRACED;
if (task_ns_capable(task, CAP_SYS_PTRACE))
task->ptrace |= PT_PTRACE_CAP;
__ptrace_link(task, current);
send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
spin_lock(&task->sighand->siglock);
/*
* If the task is already STOPPED, set GROUP_STOP_PENDING and
* TRAPPING, and kick it so that it transits to TRACED. TRAPPING
* will be cleared if the child completes the transition or any
* event which clears the group stop states happens. We'll wait
* for the transition to complete before returning from this
* function.
*
* This hides STOPPED -> RUNNING -> TRACED transition from the
* attaching thread but a different thread in the same group can
* still observe the transient RUNNING state. IOW, if another
* thread's WNOHANG wait(2) on the stopped tracee races against
* ATTACH, the wait(2) may fail due to the transient RUNNING.
*
* The following task_is_stopped() test is safe as both transitions
* in and out of STOPPED are protected by siglock.
*/
if (task_is_stopped(task)) {
task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
signal_wake_up(task, 1);
wait_trap = true;
}
spin_unlock(&task->sighand->siglock);
retval = 0;
unlock_tasklist:
write_unlock_irq(&tasklist_lock);
unlock_creds:
mutex_unlock(&task->signal->cred_guard_mutex);
out:
if (wait_trap)
wait_event(current->signal->wait_chldexit,
!(task->group_stop & GROUP_STOP_TRAPPING));
return retval;
}
示例12: get_device
/**
* zfcp_unit_enqueue - enqueue unit to unit list of a port.
* @port: pointer to port where unit is added
* @fcp_lun: FCP LUN of unit to be enqueued
* Returns: pointer to enqueued unit on success, ERR_PTR on error
*
* Sets up some unit internal structures and creates sysfs entry.
*/
struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
int retval = -ENOMEM;
get_device(&port->dev);
unit = zfcp_get_unit_by_lun(port, fcp_lun);
if (unit) {
put_device(&unit->dev);
retval = -EEXIST;
goto err_out;
}
unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
if (!unit)
goto err_out;
unit->port = port;
unit->fcp_lun = fcp_lun;
unit->dev.parent = &port->dev;
unit->dev.release = zfcp_unit_release;
if (dev_set_name(&unit->dev, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
goto err_out;
}
retval = -EINVAL;
INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
spin_lock_init(&unit->latencies.lock);
unit->latencies.write.channel.min = 0xFFFFFFFF;
unit->latencies.write.fabric.min = 0xFFFFFFFF;
unit->latencies.read.channel.min = 0xFFFFFFFF;
unit->latencies.read.fabric.min = 0xFFFFFFFF;
unit->latencies.cmd.channel.min = 0xFFFFFFFF;
unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
if (device_register(&unit->dev)) {
put_device(&unit->dev);
goto err_out;
}
if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
goto err_out_put;
write_lock_irq(&port->unit_list_lock);
list_add_tail(&unit->list, &port->unit_list);
write_unlock_irq(&port->unit_list_lock);
atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
return unit;
err_out_put:
device_unregister(&unit->dev);
err_out:
put_device(&port->dev);
return ERR_PTR(retval);
}
示例13: kref_get
/**
* zfcp_port_enqueue - enqueue port to port list of adapter
* @adapter: adapter where remote port is added
* @wwpn: WWPN of the remote port to be enqueued
* @status: initial status for the port
* @d_id: destination id of the remote port to be enqueued
* Returns: pointer to enqueued port on success, ERR_PTR on error
*
* All port internal structures are set up and the sysfs entry is generated.
* d_id is used to enqueue ports with a well known address like the Directory
* Service for nameserver lookup.
*/
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
u32 status, u32 d_id)
{
struct zfcp_port *port;
int retval = -ENOMEM;
kref_get(&adapter->ref);
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (port) {
put_device(&port->dev);
retval = -EEXIST;
goto err_out;
}
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
goto err_out;
rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
port->adapter = adapter;
port->d_id = d_id;
port->wwpn = wwpn;
port->rport_task = RPORT_NONE;
port->dev.parent = &adapter->ccw_device->dev;
port->dev.release = zfcp_port_release;
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
}
retval = -EINVAL;
if (device_register(&port->dev)) {
put_device(&port->dev);
goto err_out;
}
if (sysfs_create_group(&port->dev.kobj,
&zfcp_sysfs_port_attrs))
goto err_out_put;
write_lock_irq(&adapter->port_list_lock);
list_add_tail(&port->list, &adapter->port_list);
write_unlock_irq(&adapter->port_list_lock);
atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
return port;
err_out_put:
device_unregister(&port->dev);
err_out:
zfcp_ccw_adapter_put(adapter);
return ERR_PTR(retval);
}
示例14: ERR_PTR
//.........这里部分代码省略.........
* The task hasn't been attached yet, so its cpus_allowed mask will
* not be changed, nor will its assigned CPU.
*
* The cpus_allowed mask of the parent may have changed after it was
* copied first time - so re-copy it here, then check the child's CPU
* to ensure it is on a valid CPU (and if not, just force it back to
* parent's CPU). This avoids alot of nasty races.
*/
p->cpus_allowed = current->cpus_allowed;
if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
!cpu_online(task_cpu(p))))
set_task_cpu(p, smp_processor_id());
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
p->real_parent = current->real_parent;
else
p->real_parent = current;
p->parent = p->real_parent;
spin_lock(¤t->sighand->siglock);
/*
* Process group and session signals need to be delivered to just the
* parent before the fork or both the parent and the child after the
* fork. Restart if a signal comes in before we add the new process to
* it's process group.
* A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL).
*/
recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_cleanup_namespaces;
}
if (clone_flags & CLONE_THREAD) {
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
if (!cputime_eq(current->signal->it_virt_expires,
cputime_zero) ||
!cputime_eq(current->signal->it_prof_expires,
cputime_zero) ||
current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
!list_empty(¤t->signal->cpu_timers[0]) ||
!list_empty(¤t->signal->cpu_timers[1]) ||
!list_empty(¤t->signal->cpu_timers[2])) {
/*
* Have child wake up on its first tick to check
* for process CPU timers.
*/
p->it_prof_expires = jiffies_to_cputime(1);
}
}
if (likely(p->pid)) {
add_parent(p);
if (unlikely(p->ptrace & PT_PTRACED))
__ptrace_link(p, current->parent);
if (thread_group_leader(p)) {
p->signal->tty = current->signal->tty;
p->signal->pgrp = process_group(current);
示例15: write_one_page
/**
* write_one_page - write out a single page and optionally wait on I/O
* @page: the page to write
* @wait: if true, wait on writeout
*
* The page must be locked by the caller and will be unlocked upon return.
*
* write_one_page() returns a negative error code if I/O failed.
*/
int write_one_page(struct page *page, int wait)
{
struct address_space *mapping = page->mapping;
int ret = 0;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 1,
};
BUG_ON(!PageLocked(page));
if (wait)
wait_on_page_writeback(page);
if (clear_page_dirty_for_io(page)) {
page_cache_get(page);
ret = mapping->a_ops->writepage(page, &wbc);
if (ret == 0 && wait) {
wait_on_page_writeback(page);
if (PageError(page))
ret = -EIO;
}
page_cache_release(page);
} else {
unlock_page(page);
}
return ret;
}
EXPORT_SYMBOL(write_one_page);
/*
* For address_spaces which do not use buffers nor write back.
*/
int __set_page_dirty_no_writeback(struct page *page)
{
if (!PageDirty(page))
SetPageDirty(page);
return 0;
}
/*
* For address_spaces which do not use buffers. Just tag the page as dirty in
* its radix tree.
*
* This is also used when a single buffer is being dirtied: we want to set the
* page dirty in that case, but not all the buffers. This is a "bottom-up"
* dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
*
* Most callers have locked the page, which pins the address_space in memory.
* But zap_pte_range() does not lock the page, however in that case the
* mapping is pinned by the vma's ->vm_file reference.
*
* We take care to handle the case where the page was truncated from the
* mapping by re-checking page_mapping() insode tree_lock.
*/
int __set_page_dirty_nobuffers(struct page *page)
{
if (!TestSetPageDirty(page)) {
struct address_space *mapping = page_mapping(page);
struct address_space *mapping2;
if (!mapping)
return 1;
write_lock_irq(&mapping->tree_lock);
mapping2 = page_mapping(page);
if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping);
WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
if (mapping_cap_account_dirty(mapping)) {
__inc_zone_page_state(page, NR_FILE_DIRTY);
task_io_account_write(PAGE_CACHE_SIZE);
}
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
write_unlock_irq(&mapping->tree_lock);
if (mapping->host) {
/* !PageAnon && !swapper_space */
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}
return 1;
}
return 0;
}
EXPORT_SYMBOL(__set_page_dirty_nobuffers);
/*
* When a writepage implementation decides that it doesn't want to write this
* page for some reason, it should redirty the locked page via
* redirty_page_for_writepage() and it should then unlock the page and return 0
//.........这里部分代码省略.........