本文整理汇总了C++中write_unlock函数的典型用法代码示例。如果您正苦于以下问题:C++ write_unlock函数的具体用法?C++ write_unlock怎么用?C++ write_unlock使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了write_unlock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: led_interval_store
static ssize_t led_interval_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_netdev_data *trigger_data = led_cdev->trigger_data;
int ret = -EINVAL;
char *after;
unsigned long value = simple_strtoul(buf, &after, 10);
size_t count = after - buf;
if (*after && isspace(*after))
count++;
/* impose some basic bounds on the timer interval */
if (count == size && value >= 5 && value <= 10000) {
write_lock(&trigger_data->lock);
trigger_data->interval = msecs_to_jiffies(value);
set_baseline_state(trigger_data); // resets timer
write_unlock(&trigger_data->lock);
ret = count;
}
return ret;
}
示例2: integrity_iint_find
/**
* integrity_inode_get - find or allocate an iint associated with an inode
* @inode: pointer to the inode
* @return: allocated iint
*
* Caller must lock i_mutex
*/
struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
{
struct rb_node **p;
struct rb_node *node, *parent = NULL;
struct integrity_iint_cache *iint, *test_iint;
iint = integrity_iint_find(inode);
if (iint)
return iint;
iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
if (!iint)
return NULL;
write_lock(&integrity_iint_lock);
p = &integrity_iint_tree.rb_node;
while (*p) {
parent = *p;
test_iint = rb_entry(parent, struct integrity_iint_cache,
rb_node);
if (inode < test_iint->inode)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
iint->inode = inode;
node = &iint->rb_node;
inode->i_flags |= S_IMA;
rb_link_node(node, parent, p);
rb_insert_color(node, &integrity_iint_tree);
write_unlock(&integrity_iint_lock);
return iint;
}
示例3: __iounmap
void __iounmap(volatile void __iomem *addr)
{
#ifndef CONFIG_SMP
struct vm_struct **p, *tmp;
#endif
unsigned int section_mapping = 0;
addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr);
#ifndef CONFIG_SMP
/*
* If this is a section based mapping we need to handle it
* specially as the VM subsystem does not know how to handle
* such a beast. We need the lock here b/c we need to clear
* all the mappings before the area can be reclaimed
* by someone else.
*/
write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
if (tmp->flags & VM_ARM_SECTION_MAPPING) {
*p = tmp->next;
unmap_area_sections((unsigned long)tmp->addr,
tmp->size);
kfree(tmp);
section_mapping = 1;
}
break;
}
}
write_unlock(&vmlist_lock);
#endif
if (!section_mapping)
vunmap((void __force *)addr);
}
示例4: ip6_fl_gc
static void ip6_fl_gc(unsigned long dummy)
{
int i;
unsigned long now = jiffies;
unsigned long sched = 0;
write_lock(&ip6_fl_lock);
for (i=0; i<=FL_HASH_MASK; i++) {
struct ip6_flowlabel *fl, **flp;
flp = &fl_ht[i];
while ((fl=*flp) != NULL) {
if (atomic_read(&fl->users) == 0) {
unsigned long ttd = fl->lastuse + fl->linger;
if (time_after(ttd, fl->expires))
fl->expires = ttd;
ttd = fl->expires;
if (time_after_eq(now, ttd)) {
*flp = fl->next;
fl_free(fl);
atomic_dec(&fl_size);
continue;
}
if (!sched || time_before(ttd, sched))
sched = ttd;
}
flp = &fl->next;
}
}
if (!sched && atomic_read(&fl_size))
sched = now + FL_MAX_LINGER;
if (sched) {
mod_timer(&ip6_fl_gc_timer, sched);
}
write_unlock(&ip6_fl_lock);
}
示例5: cbk_cache_invalidate
/* Remove references, if any, to @node from coord cache */
void cbk_cache_invalidate(const znode * node /* node to remove from cache */ ,
reiser4_tree * tree/* tree to remove node from */)
{
cbk_cache_slot *slot;
cbk_cache *cache;
int i;
assert("nikita-350", node != NULL);
assert("nikita-1479", LOCK_CNT_GTZ(rw_locked_tree));
cache = &tree->cbk_cache;
assert("nikita-2470", cbk_cache_invariant(cache));
write_lock(&(cache->guard));
for (i = 0, slot = cache->slot; i < cache->nr_slots; ++i, ++slot) {
if (slot->node == node) {
list_move_tail(&slot->lru, &cache->lru);
slot->node = NULL;
break;
}
}
write_unlock(&(cache->guard));
assert("nikita-2471", cbk_cache_invariant(cache));
}
示例6: ami304_ioctl
static int ami304_ioctl(struct inode *inode, struct file *file, unsigned int cmd,unsigned long arg)
{
char strbuf[AMI304_BUFSIZE];
int controlbuf[10];
void __user *data;
int retval=0;
int mode=0;
#if DEBUG_AMI304
printk(KERN_ERR "ami304 - %s \n",__FUNCTION__);
#endif
//check the authority is root or not
if (!capable(CAP_SYS_ADMIN)) {
retval = -EPERM;
goto err_out;
}
switch (cmd) {
case AMI304_IOCTL_INIT:
read_lock(&ami304_data.lock);
mode = ami304_data.mode;
read_unlock(&ami304_data.lock);
AMI304_Init(mode);
break;
case AMI304_IOCTL_READ_CHIPINFO:
data = (void __user *) arg;
if (data == NULL)
break;
AMI304_ReadChipInfo(strbuf, AMI304_BUFSIZE);
if (copy_to_user(data, strbuf, strlen(strbuf) + 1)) {
retval = -EFAULT;
goto err_out;
}
break;
case AMI304_IOCTL_READ_SENSORDATA:
data = (void __user *) arg;
if (data == NULL)
break;
AMI304_ReadSensorData(strbuf, AMI304_BUFSIZE);
if (copy_to_user(data, strbuf, strlen(strbuf) + 1)) {
retval = -EFAULT;
goto err_out;
}
break;
case AMI304_IOCTL_READ_POSTUREDATA:
data = (void __user *) arg;
if (data == NULL)
break;
AMI304_ReadPostureData(strbuf, AMI304_BUFSIZE);
if (copy_to_user(data, strbuf, strlen(strbuf) + 1)) {
retval = -EFAULT;
goto err_out;
}
break;
case AMI304_IOCTL_READ_CALIDATA:
data = (void __user *) arg;
if (data == NULL)
break;
AMI304_ReadCaliData(strbuf, AMI304_BUFSIZE);
if (copy_to_user(data, strbuf, strlen(strbuf) + 1)) {
retval = -EFAULT;
goto err_out;
}
break;
case AMI304_IOCTL_READ_CONTROL:
read_lock(&ami304mid_data.ctrllock);
memcpy(controlbuf, &ami304mid_data.controldata[0], sizeof(controlbuf));
read_unlock(&ami304mid_data.ctrllock);
data = (void __user *) arg;
if (data == NULL)
break;
if (copy_to_user(data, controlbuf, sizeof(controlbuf))) {
retval = -EFAULT;
goto err_out;
}
break;
case AMI304_IOCTL_SET_CONTROL:
data = (void __user *) arg;
if (data == NULL)
break;
if (copy_from_user(controlbuf, data, sizeof(controlbuf))) {
retval = -EFAULT;
goto err_out;
}
write_lock(&ami304mid_data.ctrllock);
memcpy(&ami304mid_data.controldata[0], controlbuf, sizeof(controlbuf));
write_unlock(&ami304mid_data.ctrllock);
break;
case AMI304_IOCTL_SET_MODE:
data = (void __user *) arg;
if (data == NULL)
break;
if (copy_from_user(&mode, data, sizeof(mode))) {
//.........这里部分代码省略.........
示例7: SYSCALL_DEFINE1
/*
* unshare allows a process to 'unshare' part of the process
* context which was originally shared using clone. copy_*
* functions used by do_fork() cannot be used here directly
* because they modify an inactive task_struct that is being
* constructed. Here we are modifying the current, active,
* task_struct.
*/
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
int err = 0;
struct fs_struct *fs, *new_fs = NULL;
struct sighand_struct *new_sigh = NULL;
struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
struct files_struct *fd, *new_fd = NULL;
struct nsproxy *new_nsproxy = NULL;
int do_sysvsem = 0;
check_unshare_flags(&unshare_flags);
/* Return -EINVAL for all unsupported flags */
err = -EINVAL;
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
goto bad_unshare_out;
/*
* CLONE_NEWIPC must also detach from the undolist: after switching
* to a new ipc namespace, the semaphore arrays from the old
* namespace are unreachable.
*/
if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
do_sysvsem = 1;
if ((err = unshare_thread(unshare_flags)))
goto bad_unshare_out;
if ((err = unshare_fs(unshare_flags, &new_fs)))
goto bad_unshare_cleanup_thread;
if ((err = unshare_sighand(unshare_flags, &new_sigh)))
goto bad_unshare_cleanup_fs;
if ((err = unshare_vm(unshare_flags, &new_mm)))
goto bad_unshare_cleanup_sigh;
if ((err = unshare_fd(unshare_flags, &new_fd)))
goto bad_unshare_cleanup_vm;
if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
new_fs)))
goto bad_unshare_cleanup_fd;
if (new_fs || new_mm || new_fd || do_sysvsem || new_nsproxy) {
if (do_sysvsem) {
/*
* CLONE_SYSVSEM is equivalent to sys_exit().
*/
exit_sem(current);
}
if (new_nsproxy) {
switch_task_namespaces(current, new_nsproxy);
new_nsproxy = NULL;
}
task_lock(current);
if (new_fs) {
fs = current->fs;
write_lock(&fs->lock);
current->fs = new_fs;
if (--fs->users)
new_fs = NULL;
else
new_fs = fs;
write_unlock(&fs->lock);
}
if (new_mm) {
mm = current->mm;
active_mm = current->active_mm;
current->mm = new_mm;
current->active_mm = new_mm;
activate_mm(active_mm, new_mm);
new_mm = mm;
}
if (new_fd) {
fd = current->files;
current->files = new_fd;
new_fd = fd;
}
task_unlock(current);
}
if (new_nsproxy)
put_nsproxy(new_nsproxy);
bad_unshare_cleanup_fd:
if (new_fd)
put_files_struct(new_fd);
bad_unshare_cleanup_vm:
//.........这里部分代码省略.........
示例8: zfcp_erp_strategy
static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
{
int retval;
struct zfcp_adapter *adapter = erp_action->adapter;
unsigned long flags;
read_lock_irqsave(&zfcp_data.config_lock, flags);
write_lock(&adapter->erp_lock);
zfcp_erp_strategy_check_fsfreq(erp_action);
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
zfcp_erp_action_dequeue(erp_action);
retval = ZFCP_ERP_DISMISSED;
goto unlock;
}
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
retval = ZFCP_ERP_FAILED;
goto check_target;
}
zfcp_erp_action_to_running(erp_action);
/* no lock to allow for blocking operations */
write_unlock(&adapter->erp_lock);
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
retval = zfcp_erp_strategy_do_action(erp_action);
read_lock_irqsave(&zfcp_data.config_lock, flags);
write_lock(&adapter->erp_lock);
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
retval = ZFCP_ERP_CONTINUES;
switch (retval) {
case ZFCP_ERP_NOMEM:
if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
++adapter->erp_low_mem_count;
erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
}
if (adapter->erp_total_count == adapter->erp_low_mem_count)
_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL);
else {
zfcp_erp_strategy_memwait(erp_action);
retval = ZFCP_ERP_CONTINUES;
}
goto unlock;
case ZFCP_ERP_CONTINUES:
if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
--adapter->erp_low_mem_count;
erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
}
goto unlock;
}
check_target:
retval = zfcp_erp_strategy_check_target(erp_action, retval);
zfcp_erp_action_dequeue(erp_action);
retval = zfcp_erp_strategy_statechange(erp_action, retval);
if (retval == ZFCP_ERP_EXIT)
goto unlock;
if (retval == ZFCP_ERP_SUCCEEDED)
zfcp_erp_strategy_followup_success(erp_action);
if (retval == ZFCP_ERP_FAILED)
zfcp_erp_strategy_followup_failed(erp_action);
unlock:
write_unlock(&adapter->erp_lock);
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
if (retval != ZFCP_ERP_CONTINUES)
zfcp_erp_action_cleanup(erp_action, retval);
return retval;
}
示例9: qsd_fini
/*
* Release a qsd_instance. Companion of qsd_init(). This releases all data
* structures associated with the quota slave (on-disk objects, lquota entry
* tables, ...).
* This function should be called when the OSD is shutting down.
*
* \param env - is the environment passed by the caller
* \param qsd - is the qsd instance to shutdown
*/
void qsd_fini(const struct lu_env *env, struct qsd_instance *qsd)
{
int qtype;
ENTRY;
if (unlikely(qsd == NULL))
RETURN_EXIT;
CDEBUG(D_QUOTA, "%s: initiating QSD shutdown\n", qsd->qsd_svname);
write_lock(&qsd->qsd_lock);
qsd->qsd_stopping = true;
write_unlock(&qsd->qsd_lock);
/* remove qsd proc entry */
if (qsd->qsd_proc != NULL) {
lprocfs_remove(&qsd->qsd_proc);
qsd->qsd_proc = NULL;
}
/* stop the writeback thread */
qsd_stop_upd_thread(qsd);
/* shutdown the reintegration threads */
for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
if (qsd->qsd_type_array[qtype] == NULL)
continue;
qsd_stop_reint_thread(qsd->qsd_type_array[qtype]);
}
if (qsd->qsd_ns != NULL) {
qsd->qsd_ns = NULL;
}
/* free per-quota type data */
for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++)
qsd_qtype_fini(env, qsd, qtype);
/* deregister connection to the quota master */
qsd->qsd_exp_valid = false;
lustre_deregister_lwp_item(&qsd->qsd_exp);
/* release per-filesystem information */
if (qsd->qsd_fsinfo != NULL) {
mutex_lock(&qsd->qsd_fsinfo->qfs_mutex);
/* remove from the list of fsinfo */
cfs_list_del_init(&qsd->qsd_link);
mutex_unlock(&qsd->qsd_fsinfo->qfs_mutex);
qsd_put_fsinfo(qsd->qsd_fsinfo);
qsd->qsd_fsinfo = NULL;
}
/* release quota root directory */
if (qsd->qsd_root != NULL) {
lu_object_put(env, &qsd->qsd_root->do_lu);
qsd->qsd_root = NULL;
}
/* release reference on dt_device */
if (qsd->qsd_dev != NULL) {
lu_ref_del(&qsd->qsd_dev->dd_lu_dev.ld_reference, "qsd", qsd);
lu_device_put(&qsd->qsd_dev->dd_lu_dev);
qsd->qsd_dev = NULL;
}
CDEBUG(D_QUOTA, "%s: QSD shutdown completed\n", qsd->qsd_svname);
OBD_FREE_PTR(qsd);
EXIT;
}
示例10: cache_get_and_pin
/*
* REQUIRES:
* a) cache list read(write) lock
*
* PROCESSES:
* a) lock hash chain
* b) find pair via key
* c) if found, lock the value and return
* d) if not found:
* d0) list lock
* d1) add to cache/evict/clean list
* d2) value lock
* d3) list unlock
* d4) disk-mtx lock
* d5) fetch from disk
* d6) disk-mtx unlock
*/
int cache_get_and_pin(struct cache_file *cf,
NID k,
struct node **n,
enum lock_type locktype)
{
struct cpair *p;
struct cache *c = cf->cache;
TRY_PIN:
/* make room for me, please */
_make_room(c);
cpair_locked_by_key(c->table, k);
p = cpair_htable_find(c->table, k);
cpair_unlocked_by_key(c->table, k);
if (p) {
if (locktype != L_READ) {
if (!try_write_lock(&p->value_lock))
goto TRY_PIN;
} else {
if (!try_read_lock(&p->value_lock))
goto TRY_PIN;
}
*n = p->v;
return NESS_OK;
}
cpair_locked_by_key(c->table, k);
p = cpair_htable_find(c->table, k);
if (p) {
/*
* if we go here, means that someone got before us
* try pin again
*/
cpair_unlocked_by_key(c->table, k);
goto TRY_PIN;
}
struct tree_callback *tcb = cf->tcb;
struct tree *tree = (struct tree*)cf->args;
int r = tcb->fetch_node(tree, k, n);
if (r != NESS_OK) {
__PANIC("fetch node from disk error, nid [%" PRIu64 "], errno %d",
k,
r);
goto ERR;
}
p = cpair_new();
cpair_init(p, *n, cf);
/* add to cache list */
write_lock(&c->clock_lock);
_cache_insert(c, p);
write_unlock(&c->clock_lock);
if (locktype != L_READ) {
write_lock(&p->value_lock);
} else {
read_lock(&p->value_lock);
}
cpair_unlocked_by_key(c->table, k);
return NESS_OK;
ERR:
return NESS_ERR;
}
示例11: ip6_frag_queue
//.........这里部分代码省略.........
prev = NULL;
for(next = fq->q.fragments; next != NULL; next = next->next) {
if (FRAG6_CB(next)->offset >= offset)
break; /* bingo! */
prev = next;
}
/* We found where to put this one. Check for overlap with
* preceding fragment, and, if needed, align things so that
* any overlaps are eliminated.
*/
if (prev) {
int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
if (i > 0) {
offset += i;
if (end <= offset)
goto err;
if (!pskb_pull(skb, i))
goto err;
if (skb->ip_summed != CHECKSUM_UNNECESSARY)
skb->ip_summed = CHECKSUM_NONE;
}
}
/* Look for overlap with succeeding segments.
* If we can merge fragments, do it.
*/
while (next && FRAG6_CB(next)->offset < end) {
int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
if (i < next->len) {
/* Eat head of the next overlapped fragment
* and leave the loop. The next ones cannot overlap.
*/
if (!pskb_pull(next, i))
goto err;
FRAG6_CB(next)->offset += i; /* next fragment */
fq->q.meat -= i;
if (next->ip_summed != CHECKSUM_UNNECESSARY)
next->ip_summed = CHECKSUM_NONE;
break;
} else {
struct sk_buff *free_it = next;
/* Old fragment is completely overridden with
* new one drop it.
*/
next = next->next;
if (prev)
prev->next = next;
else
fq->q.fragments = next;
fq->q.meat -= free_it->len;
frag_kfree_skb(fq->q.net, free_it, NULL);
}
}
FRAG6_CB(skb)->offset = offset;
/* Insert this fragment in the chain of fragments. */
skb->next = next;
if (prev)
prev->next = skb;
else
fq->q.fragments = skb;
dev = skb->dev;
if (dev) {
fq->iif = dev->ifindex;
skb->dev = NULL;
}
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
atomic_add(skb->truesize, &fq->q.net->mem);
/* The first fragment.
* nhoffset is obtained from the first fragment, of course.
*/
if (offset == 0) {
fq->nhoffset = nhoff;
fq->q.last_in |= INET_FRAG_FIRST_IN;
}
if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len)
return ip6_frag_reasm(fq, prev, dev);
write_lock(&ip6_frags.lock);
list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
write_unlock(&ip6_frags.lock);
return -1;
err:
IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
kfree_skb(skb);
return -1;
}
示例12: __jbd2_log_wait_for_space
/*
* __jbd2_log_wait_for_space: wait until there is space in the journal.
*
* Called under j-state_lock *only*. It will be unlocked if we have to wait
* for a checkpoint to free up some space in the log.
*/
void __jbd2_log_wait_for_space(journal_t *journal)
{
int nblocks, space_left;
/* assert_spin_locked(&journal->j_state_lock); */
nblocks = jbd2_space_needed(journal);
while (jbd2_log_space_left(journal) < nblocks) {
write_unlock(&journal->j_state_lock);
mutex_lock(&journal->j_checkpoint_mutex);
/*
* Test again, another process may have checkpointed while we
* were waiting for the checkpoint lock. If there are no
* transactions ready to be checkpointed, try to recover
* journal space by calling cleanup_journal_tail(), and if
* that doesn't work, by waiting for the currently committing
* transaction to complete. If there is absolutely no way
* to make progress, this is either a BUG or corrupted
* filesystem, so abort the journal and leave a stack
* trace for forensic evidence.
*/
write_lock(&journal->j_state_lock);
if (journal->j_flags & JBD2_ABORT) {
mutex_unlock(&journal->j_checkpoint_mutex);
return;
}
spin_lock(&journal->j_list_lock);
nblocks = jbd2_space_needed(journal);
space_left = jbd2_log_space_left(journal);
if (space_left < nblocks) {
int chkpt = journal->j_checkpoint_transactions != NULL;
tid_t tid = 0;
if (journal->j_committing_transaction)
tid = journal->j_committing_transaction->t_tid;
spin_unlock(&journal->j_list_lock);
write_unlock(&journal->j_state_lock);
if (chkpt) {
jbd2_log_do_checkpoint(journal);
} else if (jbd2_cleanup_journal_tail(journal) == 0) {
/* We were able to recover space; yay! */
;
} else if (tid) {
/*
* jbd2_journal_commit_transaction() may want
* to take the checkpoint_mutex if JBD2_FLUSHED
* is set. So we need to temporarily drop it.
*/
mutex_unlock(&journal->j_checkpoint_mutex);
jbd2_log_wait_commit(journal, tid);
write_lock(&journal->j_state_lock);
continue;
} else {
printk(KERN_ERR "%s: needed %d blocks and "
"only had %d space available\n",
__func__, nblocks, space_left);
printk(KERN_ERR "%s: no way to get more "
"journal space in %s\n", __func__,
journal->j_devname);
WARN_ON(1);
jbd2_journal_abort(journal, 0);
}
write_lock(&journal->j_state_lock);
} else {
spin_unlock(&journal->j_list_lock);
}
mutex_unlock(&journal->j_checkpoint_mutex);
}
}
示例13: ext2_ioctl
long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = filp->f_dentry->d_inode;
struct ext2_inode_info *ei = EXT2_I(inode);
unsigned int flags;
unsigned short rsv_window_size;
int ret;
ext2_debug ("cmd = %u, arg = %lu\n", cmd, arg);
switch (cmd) {
case EXT2_FAKE_B_ALLOC:
/* Fake allocation for ext2 filesystem.
* */
{
struct ext2_fake_b_alloc_arg config;
struct buffer_head bh_result;
sector_t iblock, off;
int ret = 0;
ret = copy_from_user(&config, (struct ext2_fake_b_alloc_arg __user *)arg,
sizeof(struct ext2_fake_b_alloc_arg));
if (ret != 0) {
printk (KERN_DEBUG "can't copy from user");
return -EIO;
} else ret = 0;
/* Allocate blocks. */
off = config.efba_off;
iblock = config.efba_off >> inode->i_blkbits;
while ((iblock << inode->i_blkbits) <
(config.efba_off + config.efba_size)) {
memset(&bh_result, 0, sizeof(struct ext2_fake_b_alloc_arg));
ret = ext2_get_block(inode, iblock, &bh_result, 1);
if (ret < 0) {
printk (KERN_DEBUG "get_block_error %d, escaping", ret);
break;
}
iblock++;
}
/* Set metadata */
write_lock(&EXT2_I(inode)->i_meta_lock);
if (ret == 0) {
printk (KERN_DEBUG "ok, set size");
inode->i_size = max_t(loff_t, inode->i_size,
config.efba_off + config.efba_size);
} else if(iblock != config.efba_off >> inode->i_blkbits) {
/* Partially allocated, size must be fixed. *
* But `i_blocks` should containt actual information. */
inode->i_size = inode->i_blocks << inode->i_blkbits;
}
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
inode->i_version++;
write_unlock(&EXT2_I(inode)->i_meta_lock);
printk(KERN_DEBUG, "returning %d", ret);
return ret;
}
case EXT2_IOC_GETFLAGS:
ext2_get_inode_flags(ei);
flags = ei->i_flags & EXT2_FL_USER_VISIBLE;
return put_user(flags, (int __user *) arg);
case EXT2_IOC_SETFLAGS: {
unsigned int oldflags;
ret = mnt_want_write(filp->f_path.mnt);
if (ret)
return ret;
if (!is_owner_or_cap(inode)) {
ret = -EACCES;
goto setflags_out;
}
if (get_user(flags, (int __user *) arg)) {
ret = -EFAULT;
goto setflags_out;
}
flags = ext2_mask_flags(inode->i_mode, flags);
mutex_lock(&inode->i_mutex);
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode)) {
mutex_unlock(&inode->i_mutex);
ret = -EPERM;
goto setflags_out;
}
oldflags = ei->i_flags;
/*
* The IMMUTABLE and APPEND_ONLY flags can only be changed by
* the relevant capability.
*
* This test looks nicer. Thanks to Pauline Middelink
*/
if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
//.........这里部分代码省略.........
示例14: entry
/* We start counting in the buffer with entry 2 and increment for every
entry (do not increment for . or .. entry) */
static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
struct file *file, char **ppCurrentEntry, int *num_to_ret)
{
int rc = 0;
int pos_in_buf = 0;
loff_t first_entry_in_buffer;
loff_t index_to_find = file->f_pos;
struct cifsFileInfo *cifsFile = file->private_data;
/* check if index in the buffer */
if ((cifsFile == NULL) || (ppCurrentEntry == NULL) ||
(num_to_ret == NULL))
return -ENOENT;
*ppCurrentEntry = NULL;
first_entry_in_buffer =
cifsFile->srch_inf.index_of_last_entry -
cifsFile->srch_inf.entries_in_buffer;
/* if first entry in buf is zero then is first buffer
in search response data which means it is likely . and ..
will be in this buffer, although some servers do not return
. and .. for the root of a drive and for those we need
to start two entries earlier */
dump_cifs_file_struct(file, "In fce ");
if (((index_to_find < cifsFile->srch_inf.index_of_last_entry) &&
is_dir_changed(file)) ||
(index_to_find < first_entry_in_buffer)) {
/* close and restart search */
cFYI(1, ("search backing up - close and restart search"));
write_lock(&GlobalSMBSeslock);
if (!cifsFile->srch_inf.endOfSearch &&
!cifsFile->invalidHandle) {
cifsFile->invalidHandle = true;
write_unlock(&GlobalSMBSeslock);
CIFSFindClose(xid, pTcon, cifsFile->netfid);
} else
write_unlock(&GlobalSMBSeslock);
if (cifsFile->srch_inf.ntwrk_buf_start) {
cFYI(1, ("freeing SMB ff cache buf on search rewind"));
if (cifsFile->srch_inf.smallBuf)
cifs_small_buf_release(cifsFile->srch_inf.
ntwrk_buf_start);
else
cifs_buf_release(cifsFile->srch_inf.
ntwrk_buf_start);
cifsFile->srch_inf.ntwrk_buf_start = NULL;
}
rc = initiate_cifs_search(xid, file);
if (rc) {
cFYI(1, ("error %d reinitiating a search on rewind",
rc));
return rc;
}
cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
}
while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
(rc == 0) && !cifsFile->srch_inf.endOfSearch) {
cFYI(1, ("calling findnext2"));
rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
&cifsFile->srch_inf);
cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
if (rc)
return -ENOENT;
}
if (index_to_find < cifsFile->srch_inf.index_of_last_entry) {
/* we found the buffer that contains the entry */
/* scan and find it */
int i;
char *current_entry;
char *end_of_smb = cifsFile->srch_inf.ntwrk_buf_start +
smbCalcSize((struct smb_hdr *)
cifsFile->srch_inf.ntwrk_buf_start);
current_entry = cifsFile->srch_inf.srch_entries_start;
first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry
- cifsFile->srch_inf.entries_in_buffer;
pos_in_buf = index_to_find - first_entry_in_buffer;
cFYI(1, ("found entry - pos_in_buf %d", pos_in_buf));
for (i = 0; (i < (pos_in_buf)) && (current_entry != NULL); i++) {
/* go entry by entry figuring out which is first */
current_entry = nxt_dir_entry(current_entry, end_of_smb,
cifsFile->srch_inf.info_level);
}
if ((current_entry == NULL) && (i < pos_in_buf)) {
/* BB fixme - check if we should flag this error */
cERROR(1, ("reached end of buf searching for pos in buf"
" %d index to find %lld rc %d",
pos_in_buf, index_to_find, rc));
}
rc = 0;
*ppCurrentEntry = current_entry;
} else {
cFYI(1, ("index not in buffer - could not findnext into it"));
return 0;
//.........这里部分代码省略.........
示例15: hp_sdc_put
//.........这里部分代码省略.........
while (i < 4 && w7[i] == hp_sdc.r7[i])
i++;
if (i < 4) {
hp_sdc_status_out8(HP_SDC_CMD_SET_D0 + i);
hp_sdc.wi = 0x70 + i;
goto finish;
}
idx++;
if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAREG)
goto actdone;
curr->idx = idx;
act &= ~HP_SDC_ACT_DATAREG;
break;
}
hp_sdc_data_out8(w7[hp_sdc.wi - 0x70]);
hp_sdc.r7[hp_sdc.wi - 0x70] = w7[hp_sdc.wi - 0x70];
hp_sdc.wi++; /* write index register autoincrements */
{
int i = 0;
while ((i < 4) && w7[i] == hp_sdc.r7[i])
i++;
if (i >= 4) {
curr->idx = idx + 1;
if ((act & HP_SDC_ACT_DURING) ==
HP_SDC_ACT_DATAREG)
goto actdone;
}
}
goto finish;
}
/* We don't go any further in the command if there is a pending read,
because we don't want interleaved results. */
read_lock_irq(&hp_sdc.rtq_lock);
if (hp_sdc.rcurr >= 0) {
read_unlock_irq(&hp_sdc.rtq_lock);
goto finish;
}
read_unlock_irq(&hp_sdc.rtq_lock);
if (act & HP_SDC_ACT_POSTCMD) {
uint8_t postcmd;
/* curr->idx should == idx at this point. */
postcmd = curr->seq[idx];
curr->idx++;
if (act & HP_SDC_ACT_DATAIN) {
/* Start a new read */
hp_sdc.rqty = curr->seq[curr->idx];
do_gettimeofday(&hp_sdc.rtv);
curr->idx++;
/* Still need to lock here in case of spurious irq. */
write_lock_irq(&hp_sdc.rtq_lock);
hp_sdc.rcurr = curridx;
write_unlock_irq(&hp_sdc.rtq_lock);
hp_sdc_status_out8(postcmd);
goto finish;
}
hp_sdc_status_out8(postcmd);
goto actdone;
}
actdone:
if (act & HP_SDC_ACT_SEMAPHORE)
up(curr->act.semaphore);
else if (act & HP_SDC_ACT_CALLBACK)
curr->act.irqhook(0,NULL,0,0);
if (curr->idx >= curr->endidx) { /* This transaction is over. */
if (act & HP_SDC_ACT_DEALLOC)
kfree(curr);
hp_sdc.tq[curridx] = NULL;
} else {
curr->actidx = idx + 1;
curr->idx = idx + 2;
}
/* Interleave outbound data between the transactions. */
hp_sdc.wcurr++;
if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
hp_sdc.wcurr = 0;
finish:
/* If by some quirk IBF has cleared and our ISR has run to
see that that has happened, do it all again. */
if (!hp_sdc.ibf && limit++ < 20)
goto anew;
done:
if (hp_sdc.wcurr >= 0)
tasklet_schedule(&hp_sdc.task);
write_unlock(&hp_sdc.lock);
return 0;
}