本文整理汇总了C++中spin_lock函数的典型用法代码示例。如果您正苦于以下问题:C++ spin_lock函数的具体用法?C++ spin_lock怎么用?C++ spin_lock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了spin_lock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: lfsck_namespace_double_scan_main
//.........这里部分代码省略.........
lfsck_object_put(env, target);
checkpoint:
com->lc_new_checked++;
com->lc_new_scanned++;
ns->ln_fid_latest_scanned_phase2 = fid;
if (rc > 0)
ns->ln_objs_repaired_phase2++;
else if (rc < 0)
ns->ln_objs_failed_phase2++;
up_write(&com->lc_sem);
if ((rc == 0) || ((rc > 0) && !(bk->lb_param & LPF_DRYRUN))) {
lfsck_namespace_delete(env, com, &fid);
} else if (rc < 0) {
flags |= LLF_REPAIR_FAILED;
lfsck_namespace_update(env, com, &fid, flags, true);
}
if (rc < 0 && bk->lb_param & LPF_FAILOUT)
GOTO(put, rc);
if (unlikely(cfs_time_beforeq(com->lc_time_next_checkpoint,
cfs_time_current())) &&
com->lc_new_checked != 0) {
down_write(&com->lc_sem);
ns->ln_run_time_phase2 +=
cfs_duration_sec(cfs_time_current() +
HALF_SEC - com->lc_time_last_checkpoint);
ns->ln_time_last_checkpoint = cfs_time_current_sec();
ns->ln_objs_checked_phase2 += com->lc_new_checked;
com->lc_new_checked = 0;
rc = lfsck_namespace_store(env, com, false);
up_write(&com->lc_sem);
if (rc != 0)
GOTO(put, rc);
com->lc_time_last_checkpoint = cfs_time_current();
com->lc_time_next_checkpoint =
com->lc_time_last_checkpoint +
cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
}
lfsck_control_speed_by_self(com);
if (unlikely(!thread_is_running(thread)))
GOTO(put, rc = 0);
rc = iops->next(env, di);
} while (rc == 0);
GOTO(put, rc);
put:
iops->put(env, di);
fini:
iops->fini(env, di);
out:
down_write(&com->lc_sem);
ns->ln_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
HALF_SEC - lfsck->li_time_last_checkpoint);
ns->ln_time_last_checkpoint = cfs_time_current_sec();
ns->ln_objs_checked_phase2 += com->lc_new_checked;
com->lc_new_checked = 0;
if (rc > 0) {
com->lc_journal = 0;
ns->ln_status = LS_COMPLETED;
if (!(bk->lb_param & LPF_DRYRUN))
ns->ln_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
ns->ln_time_last_complete = ns->ln_time_last_checkpoint;
ns->ln_success_count++;
} else if (rc == 0) {
ns->ln_status = lfsck->li_status;
if (ns->ln_status == 0)
ns->ln_status = LS_STOPPED;
} else {
ns->ln_status = LS_FAILED;
}
if (ns->ln_status != LS_PAUSED) {
spin_lock(&lfsck->li_lock);
cfs_list_del_init(&com->lc_link);
cfs_list_add_tail(&com->lc_link, &lfsck->li_list_idle);
spin_unlock(&lfsck->li_lock);
}
rc = lfsck_namespace_store(env, com, false);
up_write(&com->lc_sem);
if (atomic_dec_and_test(&lfsck->li_double_scan_count))
wake_up_all(&thread->t_ctl_waitq);
lfsck_thread_args_fini(lta);
return rc;
}
示例2: lfsck_namespace_prep
static int lfsck_namespace_prep(const struct lu_env *env,
struct lfsck_component *com,
struct lfsck_start_param *lsp)
{
struct lfsck_instance *lfsck = com->lc_lfsck;
struct lfsck_namespace *ns = com->lc_file_ram;
struct lfsck_position *pos = &com->lc_pos_start;
if (ns->ln_status == LS_COMPLETED) {
int rc;
rc = lfsck_namespace_reset(env, com, false);
if (rc != 0)
return rc;
}
down_write(&com->lc_sem);
ns->ln_time_latest_start = cfs_time_current_sec();
spin_lock(&lfsck->li_lock);
if (ns->ln_flags & LF_SCANNED_ONCE) {
if (!lfsck->li_drop_dryrun ||
lfsck_pos_is_zero(&ns->ln_pos_first_inconsistent)) {
ns->ln_status = LS_SCANNING_PHASE2;
cfs_list_del_init(&com->lc_link);
cfs_list_add_tail(&com->lc_link,
&lfsck->li_list_double_scan);
if (!cfs_list_empty(&com->lc_link_dir))
cfs_list_del_init(&com->lc_link_dir);
lfsck_pos_set_zero(pos);
} else {
ns->ln_status = LS_SCANNING_PHASE1;
ns->ln_run_time_phase1 = 0;
ns->ln_run_time_phase2 = 0;
ns->ln_items_checked = 0;
ns->ln_items_repaired = 0;
ns->ln_items_failed = 0;
ns->ln_dirs_checked = 0;
ns->ln_mlinked_checked = 0;
ns->ln_objs_checked_phase2 = 0;
ns->ln_objs_repaired_phase2 = 0;
ns->ln_objs_failed_phase2 = 0;
ns->ln_objs_nlink_repaired = 0;
ns->ln_objs_lost_found = 0;
fid_zero(&ns->ln_fid_latest_scanned_phase2);
if (cfs_list_empty(&com->lc_link_dir))
cfs_list_add_tail(&com->lc_link_dir,
&lfsck->li_list_dir);
*pos = ns->ln_pos_first_inconsistent;
}
} else {
ns->ln_status = LS_SCANNING_PHASE1;
if (cfs_list_empty(&com->lc_link_dir))
cfs_list_add_tail(&com->lc_link_dir,
&lfsck->li_list_dir);
if (!lfsck->li_drop_dryrun ||
lfsck_pos_is_zero(&ns->ln_pos_first_inconsistent)) {
*pos = ns->ln_pos_last_checkpoint;
pos->lp_oit_cookie++;
} else {
*pos = ns->ln_pos_first_inconsistent;
}
}
spin_unlock(&lfsck->li_lock);
up_write(&com->lc_sem);
return 0;
}
示例3: scfs_readpages
/**
* scfs_readpages
*
* Parameters:
* @file: upper file
* @*mapping: address_space struct for the file
* @*pages: list of pages to read in
* @nr_pages: number of pages to read in
*
* Return:
* SCFS_SUCCESS if success, otherwise if error
*
* Description:
* - Asynchronously read pages for readahead. A scaling number of background threads
* will read & decompress them in a slightly deferred but parallelized manner.
*/
static int
scfs_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct scfs_inode_info *sii = SCFS_I(file->f_mapping->host);
struct scfs_sb_info *sbi = SCFS_S(file->f_mapping->host->i_sb);
struct file *lower_file = NULL;
struct page *page;
struct scfs_cinfo cinfo;
loff_t i_size;
pgoff_t start, end;
int page_idx, page_idx_readahead = 1024, ret = 0;
int readahead_page = 0;
int prev_cbi = 0;
int prev_cluster = -1, cur_cluster = -1;
int cluster_idx = 0;
i_size = i_size_read(&sii->vfs_inode);
if (!i_size) {
SCFS_PRINT("file %s: i_size is zero, "
"flags 0x%x sii->clust_info_size %d\n",
file->f_path.dentry->d_name.name, sii->flags,
sii->cinfo_array_size);
return 0;
}
#ifdef SCFS_ASYNC_READ_PROFILE
atomic_add(nr_pages, &sbi->scfs_standby_readpage_count);
#endif
#ifdef SCFS_NOTIFY_RANDOM_READ
lower_file = scfs_lower_file(file);
if (!lower_file) {
SCFS_PRINT_ERROR("file %s: lower file is null!\n",
file->f_path.dentry->d_name.name);
return -EINVAL;
}
/* if the read request was random (enough), hint it to the lower file.
* scfs_sequential_page_number is the tunable threshold.
* filemap.c will later on refer to this FMODE_RANDOM flag.
*/
spin_lock(&lower_file->f_lock);
if (nr_pages > sbi->scfs_sequential_page_number)
lower_file->f_mode &= ~FMODE_RANDOM;
else
lower_file->f_mode |= FMODE_RANDOM;
spin_unlock(&lower_file->f_lock);
#endif
lower_file = scfs_lower_file(file);
page = list_entry(pages->prev, struct page, lru);
cluster_idx = page->index / (sii->cluster_size / PAGE_SIZE);
if (sii->compressed) {
mutex_lock(&sii->cinfo_mutex);
ret = get_cluster_info(file, cluster_idx, &cinfo);
mutex_unlock(&sii->cinfo_mutex);
if (ret) {
SCFS_PRINT_ERROR("err in get_cluster_info, ret : %d,"
"i_size %lld\n", ret, i_size);
return ret;
}
if (!cinfo.size || cinfo.size > sii->cluster_size) {
SCFS_PRINT_ERROR("file %s: cinfo is invalid, "
"clust %u cinfo.size %u\n",
file->f_path.dentry->d_name.name,
cluster_idx, cinfo.size);
return -EINVAL;
}
start = (pgoff_t)(cinfo.offset / PAGE_SIZE);
} else {
start = (pgoff_t)(cluster_idx * sii->cluster_size / PAGE_SIZE);
}
cluster_idx = (page->index + nr_pages - 1) / (sii->cluster_size / PAGE_SIZE);
if (sii->compressed) {
mutex_lock(&sii->cinfo_mutex);
ret = get_cluster_info(file, cluster_idx, &cinfo);
mutex_unlock(&sii->cinfo_mutex);
if (ret) {
SCFS_PRINT_ERROR("err in get_cluster_info, ret : %d,"
"i_size %lld\n", ret, i_size);
return ret;
//.........这里部分代码省略.........
示例4: afs_d_revalidate
/*
* check that a dentry lookup hit has found a valid entry
* - NOTE! the hit can be a negative hit too, so we can't assume we have an inode
* (derived from nfs_lookup_revalidate)
*/
static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
{
struct afs_dir_lookup_cookie cookie;
struct dentry *parent;
struct inode *inode, *dir;
unsigned fpos;
int ret;
_enter("{sb=%p n=%s},",dentry->d_sb,dentry->d_name.name);
/* lock down the parent dentry so we can peer at it */
parent = dget_parent(dentry->d_parent);
dir = parent->d_inode;
inode = dentry->d_inode;
/* handle a negative inode */
if (!inode)
goto out_bad;
/* handle a bad inode */
if (is_bad_inode(inode)) {
printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n",
dentry->d_parent->d_name.name,dentry->d_name.name);
goto out_bad;
}
/* force a full look up if the parent directory changed since last the server was consulted
* - otherwise this inode must still exist, even if the inode details themselves have
* changed
*/
if (AFS_FS_I(dir)->flags & AFS_VNODE_CHANGED)
afs_vnode_fetch_status(AFS_FS_I(dir));
if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) {
_debug("%s: parent dir deleted",dentry->d_name.name);
goto out_bad;
}
if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) {
_debug("%s: file already deleted",dentry->d_name.name);
goto out_bad;
}
if ((unsigned long)dentry->d_fsdata != (unsigned long)AFS_FS_I(dir)->status.version) {
_debug("%s: parent changed %lu -> %u",
dentry->d_name.name,
(unsigned long)dentry->d_fsdata,
(unsigned)AFS_FS_I(dir)->status.version);
/* search the directory for this vnode */
cookie.name = dentry->d_name.name;
cookie.nlen = dentry->d_name.len;
cookie.fid.vid = AFS_FS_I(inode)->volume->vid;
cookie.found = 0;
fpos = 0;
ret = afs_dir_iterate(dir,&fpos,&cookie,afs_dir_lookup_filldir);
if (ret<0) {
_debug("failed to iterate dir %s: %d",parent->d_name.name,ret);
goto out_bad;
}
if (!cookie.found) {
_debug("%s: dirent not found",dentry->d_name.name);
goto not_found;
}
/* if the vnode ID has changed, then the dirent points to a different file */
if (cookie.fid.vnode!=AFS_FS_I(inode)->fid.vnode) {
_debug("%s: dirent changed",dentry->d_name.name);
goto not_found;
}
/* if the vnode ID uniqifier has changed, then the file has been deleted */
if (cookie.fid.unique!=AFS_FS_I(inode)->fid.unique) {
_debug("%s: file deleted (uq %u -> %u I:%lu)",
dentry->d_name.name,
cookie.fid.unique,
AFS_FS_I(inode)->fid.unique,
inode->i_version);
spin_lock(&AFS_FS_I(inode)->lock);
AFS_FS_I(inode)->flags |= AFS_VNODE_DELETED;
spin_unlock(&AFS_FS_I(inode)->lock);
invalidate_remote_inode(inode);
goto out_bad;
}
dentry->d_fsdata = (void*) (unsigned long) AFS_FS_I(dir)->status.version;
}
out_valid:
dput(parent);
_leave(" = 1 [valid]");
return 1;
//.........这里部分代码省略.........
示例5: ipath_error_qp
int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ib_wc wc;
int ret = 0;
if (qp->state == IB_QPS_ERR)
goto bail;
qp->state = IB_QPS_ERR;
spin_lock(&dev->pending_lock);
if (!list_empty(&qp->timerwait))
list_del_init(&qp->timerwait);
if (!list_empty(&qp->piowait))
list_del_init(&qp->piowait);
spin_unlock(&dev->pending_lock);
/* Schedule the sending tasklet to drain the send work queue. */
if (qp->s_last != qp->s_head)
ipath_schedule_send(qp);
memset(&wc, 0, sizeof(wc));
wc.qp = &qp->ibqp;
wc.opcode = IB_WC_RECV;
if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
wc.wr_id = qp->r_wr_id;
wc.status = err;
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
}
wc.status = IB_WC_WR_FLUSH_ERR;
if (qp->r_rq.wq) {
struct ipath_rwq *wq;
u32 head;
u32 tail;
spin_lock(&qp->r_rq.lock);
/* sanity check pointers before trusting them */
wq = qp->r_rq.wq;
head = wq->head;
if (head >= qp->r_rq.size)
head = 0;
tail = wq->tail;
if (tail >= qp->r_rq.size)
tail = 0;
while (tail != head) {
wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
if (++tail >= qp->r_rq.size)
tail = 0;
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
}
wq->tail = tail;
spin_unlock(&qp->r_rq.lock);
} else if (qp->ibqp.event_handler)
ret = 1;
bail:
return ret;
}
示例6: ERR_PTR
//.........这里部分代码省略.........
init_attr->qp_type == IB_QPT_SMI ||
init_attr->qp_type == IB_QPT_GSI)) {
qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);
if (!qp->r_ud_sg_list) {
ret = ERR_PTR(-ENOMEM);
goto bail_qp;
}
} else
qp->r_ud_sg_list = NULL;
if (init_attr->srq) {
sz = 0;
qp->r_rq.size = 0;
qp->r_rq.max_sge = 0;
qp->r_rq.wq = NULL;
init_attr->cap.max_recv_wr = 0;
init_attr->cap.max_recv_sge = 0;
} else {
qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
sizeof(struct ipath_rwqe);
qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
qp->r_rq.size * sz);
if (!qp->r_rq.wq) {
ret = ERR_PTR(-ENOMEM);
goto bail_sg_list;
}
}
/*
* ib_create_qp() will initialize qp->ibqp
* except for qp->ibqp.qp_num.
*/
spin_lock_init(&qp->s_lock);
spin_lock_init(&qp->r_rq.lock);
atomic_set(&qp->refcount, 0);
init_waitqueue_head(&qp->wait);
init_waitqueue_head(&qp->wait_dma);
tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
INIT_LIST_HEAD(&qp->piowait);
INIT_LIST_HEAD(&qp->timerwait);
qp->state = IB_QPS_RESET;
qp->s_wq = swq;
qp->s_size = init_attr->cap.max_send_wr + 1;
qp->s_max_sge = init_attr->cap.max_send_sge;
if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
else
qp->s_flags = 0;
dev = to_idev(ibpd->device);
err = ipath_alloc_qpn(&dev->qp_table, qp,
init_attr->qp_type);
if (err) {
ret = ERR_PTR(err);
vfree(qp->r_rq.wq);
goto bail_sg_list;
}
qp->ip = NULL;
qp->s_tx = NULL;
ipath_reset_qp(qp, init_attr->qp_type);
break;
default:
/* Don't support raw QPs */
ret = ERR_PTR(-ENOSYS);
goto bail;
示例7: ERR_PTR
//.........这里部分代码省略.........
/* we lost it */
journal_release_buffer(handle, bitmap_bh);
if (++ino < EXT3_INODES_PER_GROUP(sb))
goto repeat_in_this_group;
}
/*
* This case is possible in concurrent environment. It is very
* rare. We cannot repeat the find_group_xxx() call because
* that will simply return the same blockgroup, because the
* group descriptor metadata has not yet been updated.
* So we just go onto the next blockgroup.
*/
if (++group == sbi->s_groups_count)
group = 0;
}
err = -ENOSPC;
goto out;
got:
ino += group * EXT3_INODES_PER_GROUP(sb) + 1;
if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
ext3_error (sb, "ext3_new_inode",
"reserved inode or inode > inodes count - "
"block_group = %d, inode=%lu", group, ino);
err = -EIO;
goto fail;
}
BUFFER_TRACE(bh2, "get_write_access");
err = ext3_journal_get_write_access(handle, bh2);
if (err) goto fail;
spin_lock(sb_bgl_lock(sbi, group));
le16_add_cpu(&gdp->bg_free_inodes_count, -1);
if (S_ISDIR(mode)) {
le16_add_cpu(&gdp->bg_used_dirs_count, 1);
}
spin_unlock(sb_bgl_lock(sbi, group));
BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
err = ext3_journal_dirty_metadata(handle, bh2);
if (err) goto fail;
percpu_counter_dec(&sbi->s_freeinodes_counter);
if (S_ISDIR(mode))
percpu_counter_inc(&sbi->s_dirs_counter);
if (test_opt(sb, GRPID)) {
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = dir->i_gid;
} else
inode_init_owner(inode, dir, mode);
inode->i_ino = ino;
/* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_dir_start_lookup = 0;
ei->i_disksize = 0;
ei->i_flags =
ext3_mask_flags(mode, EXT3_I(dir)->i_flags & EXT3_FL_INHERITED);
示例8: ext3_free_inode
/*
* NOTE! When we get the inode, we're the only people
* that have access to it, and as such there are no
* race conditions we have to worry about. The inode
* is not on the hash-lists, and it cannot be reached
* through the filesystem because the directory entry
* has been deleted earlier.
*
* HOWEVER: we must make sure that we get no aliases,
* which means that we have to call "clear_inode()"
* _before_ we mark the inode not in use in the inode
* bitmaps. Otherwise a newly created file might use
* the same inode number (not actually the same pointer
* though), and then we'd have two inodes sharing the
* same inode number and space on the harddisk.
*/
void ext3_free_inode (handle_t *handle, struct inode * inode)
{
struct super_block * sb = inode->i_sb;
int is_directory;
unsigned long ino;
struct buffer_head *bitmap_bh = NULL;
struct buffer_head *bh2;
unsigned long block_group;
unsigned long bit;
struct ext3_group_desc * gdp;
struct ext3_super_block * es;
struct ext3_sb_info *sbi;
int fatal = 0, err;
if (atomic_read(&inode->i_count) > 1) {
printk ("ext3_free_inode: inode has count=%d\n",
atomic_read(&inode->i_count));
return;
}
if (inode->i_nlink) {
printk ("ext3_free_inode: inode has nlink=%d\n",
inode->i_nlink);
return;
}
if (!sb) {
;
return;
}
sbi = EXT3_SB(sb);
ino = inode->i_ino;
ext3_debug ("freeing inode %lu\n", ino);
is_directory = S_ISDIR(inode->i_mode);
es = EXT3_SB(sb)->s_es;
if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
ext3_error (sb, "ext3_free_inode",
"reserved or nonexistent inode %lu", ino);
goto error_return;
}
block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
bitmap_bh = read_inode_bitmap(sb, block_group);
if (!bitmap_bh)
goto error_return;
BUFFER_TRACE(bitmap_bh, "get_write_access");
fatal = ext3_journal_get_write_access(handle, bitmap_bh);
if (fatal)
goto error_return;
/* Ok, now we can actually update the inode bitmaps.. */
if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
bit, bitmap_bh->b_data))
ext3_error (sb, "ext3_free_inode",
"bit already cleared for inode %lu", ino);
else {
gdp = ext3_get_group_desc (sb, block_group, &bh2);
BUFFER_TRACE(bh2, "get_write_access");
fatal = ext3_journal_get_write_access(handle, bh2);
if (fatal) goto error_return;
if (gdp) {
spin_lock(sb_bgl_lock(sbi, block_group));
le16_add_cpu(&gdp->bg_free_inodes_count, 1);
if (is_directory)
le16_add_cpu(&gdp->bg_used_dirs_count, -1);
spin_unlock(sb_bgl_lock(sbi, block_group));
percpu_counter_inc(&sbi->s_freeinodes_counter);
if (is_directory)
percpu_counter_dec(&sbi->s_dirs_counter);
}
BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
err = ext3_journal_dirty_metadata(handle, bh2);
if (!fatal) fatal = err;
}
BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata");
err = ext3_journal_dirty_metadata(handle, bitmap_bh);
if (!fatal)
fatal = err;
//.........这里部分代码省略.........
示例9: interrupt_handler
static irqreturn_t interrupt_handler(int irq, void *dev_id)
{
struct nozomi *dc = dev_id;
unsigned int a;
u16 read_iir;
if (!dc)
return IRQ_NONE;
spin_lock(&dc->spin_mutex);
read_iir = readw(dc->reg_iir);
/* Card removed */
if (read_iir == (u16)-1)
goto none;
/*
* Just handle interrupt enabled in IER
* (by masking with dc->last_ier)
*/
read_iir &= dc->last_ier;
if (read_iir == 0)
goto none;
DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir,
dc->last_ier);
if (read_iir & RESET) {
if (unlikely(!nozomi_read_config_table(dc))) {
dc->last_ier = 0x0;
writew(dc->last_ier, dc->reg_ier);
dev_err(&dc->pdev->dev, "Could not read status from "
"card, we should disable interface\n");
} else {
writew(RESET, dc->reg_fcr);
}
/* No more useful info if this was the reset interrupt. */
goto exit_handler;
}
if (read_iir & CTRL_UL) {
DBG1("CTRL_UL");
dc->last_ier &= ~CTRL_UL;
writew(dc->last_ier, dc->reg_ier);
if (send_flow_control(dc)) {
writew(CTRL_UL, dc->reg_fcr);
dc->last_ier = dc->last_ier | CTRL_UL;
writew(dc->last_ier, dc->reg_ier);
}
}
if (read_iir & CTRL_DL) {
receive_flow_control(dc);
writew(CTRL_DL, dc->reg_fcr);
}
if (read_iir & MDM_DL) {
if (!handle_data_dl(dc, PORT_MDM,
&(dc->port[PORT_MDM].toggle_dl), read_iir,
MDM_DL1, MDM_DL2)) {
dev_err(&dc->pdev->dev, "MDM_DL out of sync!\n");
goto exit_handler;
}
}
if (read_iir & MDM_UL) {
if (!handle_data_ul(dc, PORT_MDM, read_iir)) {
dev_err(&dc->pdev->dev, "MDM_UL out of sync!\n");
goto exit_handler;
}
}
if (read_iir & DIAG_DL) {
if (!handle_data_dl(dc, PORT_DIAG,
&(dc->port[PORT_DIAG].toggle_dl), read_iir,
DIAG_DL1, DIAG_DL2)) {
dev_err(&dc->pdev->dev, "DIAG_DL out of sync!\n");
goto exit_handler;
}
}
if (read_iir & DIAG_UL) {
dc->last_ier &= ~DIAG_UL;
writew(dc->last_ier, dc->reg_ier);
if (send_data(PORT_DIAG, dc)) {
writew(DIAG_UL, dc->reg_fcr);
dc->last_ier = dc->last_ier | DIAG_UL;
writew(dc->last_ier, dc->reg_ier);
}
}
if (read_iir & APP1_DL) {
if (receive_data(PORT_APP1, dc))
writew(APP1_DL, dc->reg_fcr);
}
if (read_iir & APP1_UL) {
dc->last_ier &= ~APP1_UL;
writew(dc->last_ier, dc->reg_ier);
if (send_data(PORT_APP1, dc)) {
writew(APP1_UL, dc->reg_fcr);
dc->last_ier = dc->last_ier | APP1_UL;
writew(dc->last_ier, dc->reg_ier);
}
}
if (read_iir & APP2_DL) {
if (receive_data(PORT_APP2, dc))
//.........这里部分代码省略.........
示例10: fd_link_ioctl
static long
fd_link_ioctl (struct file *f, unsigned int ioctl, unsigned long arg)
{
void __user *argp = (void __user *) arg;
struct task_struct *task_target = NULL;
struct file *file;
struct files_struct *files;
struct fdtable *fdt;
struct fd_copy fd_copy;
switch (ioctl)
{
case FD_COPY:
if (copy_from_user (&fd_copy, argp, sizeof (struct fd_copy)))
return -EFAULT;
/*
* Find the task struct for the target pid
*/
task_target =
pid_task (find_vpid (fd_copy.target_pid), PIDTYPE_PID);
if (task_target == NULL)
{
printk (KERN_DEBUG "Failed to get mem ctx for target pid\n");
return -EFAULT;
}
files = get_files_struct (current);
if (files == NULL)
{
printk (KERN_DEBUG "Failed to get files struct\n");
return -EFAULT;
}
rcu_read_lock ();
file = fcheck_files (files, fd_copy.source_fd);
if (file)
{
if (file->f_mode & FMODE_PATH
|| !atomic_long_inc_not_zero (&file->f_count))
file = NULL;
}
rcu_read_unlock ();
put_files_struct (files);
if (file == NULL)
{
printk (KERN_DEBUG "Failed to get file from source pid\n");
return 0;
}
/*
* Release the existing fd in the source process
*/
spin_lock (&files->file_lock);
filp_close (file, files);
fdt = files_fdtable (files);
fdt->fd[fd_copy.source_fd] = NULL;
spin_unlock (&files->file_lock);
/*
* Find the file struct associated with the target fd.
*/
files = get_files_struct (task_target);
if (files == NULL)
{
printk (KERN_DEBUG "Failed to get files struct\n");
return -EFAULT;
}
rcu_read_lock ();
file = fcheck_files (files, fd_copy.target_fd);
if (file)
{
if (file->f_mode & FMODE_PATH
|| !atomic_long_inc_not_zero (&file->f_count))
file = NULL;
}
rcu_read_unlock ();
put_files_struct (files);
if (file == NULL)
{
printk (KERN_DEBUG "Failed to get file from target pid\n");
return 0;
}
/*
* Install the file struct from the target process into the
* file desciptor of the source process,
*/
fd_install (fd_copy.source_fd, file);
return 0;
default:
return -ENOIOCTLCMD;
//.........这里部分代码省略.........
示例11: mdss_dsi_isr
irqreturn_t mdss_dsi_isr(int irq, void *ptr)
{
u32 isr;
struct mdss_dsi_ctrl_pdata *ctrl =
(struct mdss_dsi_ctrl_pdata *)ptr;
if (!ctrl->ctrl_base)
pr_err("%s:%d DSI base adr no Initialized",
__func__, __LINE__);
isr = MIPI_INP(ctrl->ctrl_base + 0x0110);/* DSI_INTR_CTRL */
MIPI_OUTP(ctrl->ctrl_base + 0x0110, isr);
if (ctrl->shared_pdata.broadcast_enable)
if ((ctrl->panel_data.panel_info.pdest == DISPLAY_2)
&& (left_ctrl_pdata != NULL)) {
u32 isr0;
isr0 = MIPI_INP(left_ctrl_pdata->ctrl_base
+ 0x0110);/* DSI_INTR_CTRL */
MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0110, isr0);
}
pr_debug("%s: ndx=%d isr=%x\n", __func__, ctrl->ndx, isr);
if (isr & DSI_INTR_ERROR) {
#ifdef F_WA_WATCHDOG_DURING_BOOTUP
if(ctrl->octa_blck_set)
#endif
pr_err("%s: ndx=%d isr=%x\n", __func__, ctrl->ndx, isr);
mdss_dsi_error(ctrl);
}
if (isr & DSI_INTR_VIDEO_DONE) {
spin_lock(&ctrl->mdp_lock);
mdss_dsi_disable_irq_nosync(ctrl, DSI_VIDEO_TERM);
complete(&ctrl->video_comp);
spin_unlock(&ctrl->mdp_lock);
}
if (isr & DSI_INTR_CMD_DMA_DONE) {
spin_lock(&ctrl->mdp_lock);
mdss_dsi_disable_irq_nosync(ctrl, DSI_CMD_TERM);
complete(&ctrl->dma_comp);
spin_unlock(&ctrl->mdp_lock);
}
if (isr & DSI_INTR_CMD_MDP_DONE) {
spin_lock(&ctrl->mdp_lock);
ctrl->mdp_busy = false;
mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
complete(&ctrl->mdp_comp);
spin_unlock(&ctrl->mdp_lock);
}
if (isr & DSI_INTR_BTA_DONE) {
spin_lock(&ctrl->mdp_lock);
mdss_dsi_disable_irq_nosync(ctrl, DSI_BTA_TERM);
complete(&ctrl->bta_comp);
spin_unlock(&ctrl->mdp_lock);
}
return IRQ_HANDLED;
}
示例12: sid_to_id
static int
sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
struct cifs_fattr *fattr, uint sidtype)
{
int rc;
unsigned long cid;
struct key *idkey;
const struct cred *saved_cred;
struct cifs_sid_id *psidid, *npsidid;
struct rb_root *cidtree;
spinlock_t *cidlock;
if (sidtype == SIDOWNER) {
cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
cidlock = &siduidlock;
cidtree = &uidtree;
} else if (sidtype == SIDGROUP) {
cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
cidlock = &sidgidlock;
cidtree = &gidtree;
} else
return -ENOENT;
spin_lock(cidlock);
psidid = id_rb_search(cidtree, psid);
if (!psidid) { /* node does not exist, allocate one & attempt adding */
spin_unlock(cidlock);
npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
if (!npsidid)
return -ENOMEM;
npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
if (!npsidid->sidstr) {
kfree(npsidid);
return -ENOMEM;
}
spin_lock(cidlock);
psidid = id_rb_search(cidtree, psid);
if (psidid) { /* node happened to get inserted meanwhile */
++psidid->refcount;
spin_unlock(cidlock);
kfree(npsidid->sidstr);
kfree(npsidid);
} else {
psidid = npsidid;
id_rb_insert(cidtree, psid, &psidid,
sidtype == SIDOWNER ? "os:" : "gs:");
++psidid->refcount;
spin_unlock(cidlock);
}
} else {
++psidid->refcount;
spin_unlock(cidlock);
}
/*
* If we are here, it is safe to access psidid and its fields
* since a reference was taken earlier while holding the spinlock.
* A reference on the node is put without holding the spinlock
* and it is OK to do so in this case, shrinker will not erase
* this node until all references are put and we do not access
* any fields of the node after a reference is put .
*/
if (test_bit(SID_ID_MAPPED, &psidid->state)) {
cid = psidid->id;
psidid->time = jiffies; /* update ts for accessing */
goto sid_to_id_out;
}
if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
goto sid_to_id_out;
if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
saved_cred = override_creds(root_cred);
idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
if (IS_ERR(idkey))
cFYI(1, "%s: Can't map SID to an id", __func__);
else {
cid = *(unsigned long *)idkey->payload.value;
psidid->id = cid;
set_bit(SID_ID_MAPPED, &psidid->state);
key_put(idkey);
kfree(psidid->sidstr);
}
revert_creds(saved_cred);
psidid->time = jiffies; /* update ts for accessing */
clear_bit(SID_ID_PENDING, &psidid->state);
wake_up_bit(&psidid->state, SID_ID_PENDING);
} else {
rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
sidid_pending_wait, TASK_INTERRUPTIBLE);
if (rc) {
cFYI(1, "%s: sidid_pending_wait interrupted %d",
__func__, rc);
--psidid->refcount; /* decremented without spinlock */
return rc;
}
if (test_bit(SID_ID_MAPPED, &psidid->state))
//.........这里部分代码省略.........
示例13: id_to_sid
static int
id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
{
int rc = 0;
struct key *sidkey;
const struct cred *saved_cred;
struct cifs_sid *lsid;
struct cifs_sid_id *psidid, *npsidid;
struct rb_root *cidtree;
spinlock_t *cidlock;
if (sidtype == SIDOWNER) {
cidlock = &siduidlock;
cidtree = &uidtree;
} else if (sidtype == SIDGROUP) {
cidlock = &sidgidlock;
cidtree = &gidtree;
} else
return -EINVAL;
spin_lock(cidlock);
psidid = sid_rb_search(cidtree, cid);
if (!psidid) { /* node does not exist, allocate one & attempt adding */
spin_unlock(cidlock);
npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
if (!npsidid)
return -ENOMEM;
npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
if (!npsidid->sidstr) {
kfree(npsidid);
return -ENOMEM;
}
spin_lock(cidlock);
psidid = sid_rb_search(cidtree, cid);
if (psidid) { /* node happened to get inserted meanwhile */
++psidid->refcount;
spin_unlock(cidlock);
kfree(npsidid->sidstr);
kfree(npsidid);
} else {
psidid = npsidid;
sid_rb_insert(cidtree, cid, &psidid,
sidtype == SIDOWNER ? "oi:" : "gi:");
++psidid->refcount;
spin_unlock(cidlock);
}
} else {
++psidid->refcount;
spin_unlock(cidlock);
}
/*
* If we are here, it is safe to access psidid and its fields
* since a reference was taken earlier while holding the spinlock.
* A reference on the node is put without holding the spinlock
* and it is OK to do so in this case, shrinker will not erase
* this node until all references are put and we do not access
* any fields of the node after a reference is put .
*/
if (test_bit(SID_ID_MAPPED, &psidid->state)) {
memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
psidid->time = jiffies; /* update ts for accessing */
goto id_sid_out;
}
if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
rc = -EINVAL;
goto id_sid_out;
}
if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
saved_cred = override_creds(root_cred);
sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
if (IS_ERR(sidkey)) {
rc = -EINVAL;
cFYI(1, "%s: Can't map and id to a SID", __func__);
} else {
lsid = (struct cifs_sid *)sidkey->payload.data;
memcpy(&psidid->sid, lsid,
sidkey->datalen < sizeof(struct cifs_sid) ?
sidkey->datalen : sizeof(struct cifs_sid));
memcpy(ssid, &psidid->sid,
sidkey->datalen < sizeof(struct cifs_sid) ?
sidkey->datalen : sizeof(struct cifs_sid));
set_bit(SID_ID_MAPPED, &psidid->state);
key_put(sidkey);
kfree(psidid->sidstr);
}
psidid->time = jiffies; /* update ts for accessing */
revert_creds(saved_cred);
clear_bit(SID_ID_PENDING, &psidid->state);
wake_up_bit(&psidid->state, SID_ID_PENDING);
} else {
rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
sidid_pending_wait, TASK_INTERRUPTIBLE);
if (rc) {
cFYI(1, "%s: sidid_pending_wait interrupted %d",
//.........这里部分代码省略.........
示例14: journal_submit_data_buffers
/*
* Submit all the data buffers to disk
*/
static void journal_submit_data_buffers(journal_t *journal,
transaction_t *commit_transaction)
{
struct journal_head *jh;
struct buffer_head *bh;
int locked;
int bufs = 0;
struct buffer_head **wbuf = journal->j_wbuf;
/*
* Whenever we unlock the journal and sleep, things can get added
* onto ->t_sync_datalist, so we have to keep looping back to
* write_out_data until we *know* that the list is empty.
*
* Cleanup any flushed data buffers from the data list. Even in
* abort mode, we want to flush this out as soon as possible.
*/
write_out_data:
cond_resched();
spin_lock(&journal->j_list_lock);
while (commit_transaction->t_sync_datalist) {
jh = commit_transaction->t_sync_datalist;
bh = jh2bh(jh);
locked = 0;
/* Get reference just to make sure buffer does not disappear
* when we are forced to drop various locks */
get_bh(bh);
/* If the buffer is dirty, we need to submit IO and hence
* we need the buffer lock. We try to lock the buffer without
* blocking. If we fail, we need to drop j_list_lock and do
* blocking lock_buffer().
*/
if (buffer_dirty(bh)) {
if (test_set_buffer_locked(bh)) {
BUFFER_TRACE(bh, "needs blocking lock");
spin_unlock(&journal->j_list_lock);
/* Write out all data to prevent deadlocks */
journal_do_submit_data(wbuf, bufs);
bufs = 0;
lock_buffer(bh);
spin_lock(&journal->j_list_lock);
}
locked = 1;
}
/* We have to get bh_state lock. Again out of order, sigh. */
if (!inverted_lock(journal, bh)) {
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
}
/* Someone already cleaned up the buffer? */
if (!buffer_jbd(bh)
|| jh->b_transaction != commit_transaction
|| jh->b_jlist != BJ_SyncData) {
jbd_unlock_bh_state(bh);
if (locked)
unlock_buffer(bh);
BUFFER_TRACE(bh, "already cleaned up");
put_bh(bh);
continue;
}
if (locked && test_clear_buffer_dirty(bh)) {
BUFFER_TRACE(bh, "needs writeout, adding to array");
wbuf[bufs++] = bh;
__jbd2_journal_file_buffer(jh, commit_transaction,
BJ_Locked);
jbd_unlock_bh_state(bh);
if (bufs == journal->j_wbufsize) {
spin_unlock(&journal->j_list_lock);
journal_do_submit_data(wbuf, bufs);
bufs = 0;
goto write_out_data;
}
} else if (!locked && buffer_locked(bh)) {
__jbd2_journal_file_buffer(jh, commit_transaction,
BJ_Locked);
jbd_unlock_bh_state(bh);
put_bh(bh);
} else {
BUFFER_TRACE(bh, "writeout complete: unfile");
__jbd2_journal_unfile_buffer(jh);
jbd_unlock_bh_state(bh);
if (locked)
unlock_buffer(bh);
jbd2_journal_remove_journal_head(bh);
/* Once for our safety reference, once for
* jbd2_journal_remove_journal_head() */
put_bh(bh);
put_bh(bh);
}
if (lock_need_resched(&journal->j_list_lock)) {
spin_unlock(&journal->j_list_lock);
goto write_out_data;
}
}
//.........这里部分代码省略.........
示例15: xuartps_isr
/**
* xuartps_isr - Interrupt handler
* @irq: Irq number
* @dev_id: Id of the port
*
* Returns IRQHANDLED
**/
static irqreturn_t xuartps_isr(int irq, void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
unsigned long flags;
unsigned int isrstatus, numbytes;
unsigned int data;
char status = TTY_NORMAL;
spin_lock_irqsave(&port->lock, flags);
/* Read the interrupt status register to determine which
* interrupt(s) is/are active.
*/
isrstatus = xuartps_readl(XUARTPS_ISR_OFFSET);
/* drop byte with parity error if IGNPAR specified */
if (isrstatus & port->ignore_status_mask & XUARTPS_IXR_PARITY)
isrstatus &= ~(XUARTPS_IXR_RXTRIG | XUARTPS_IXR_TOUT);
isrstatus &= port->read_status_mask;
isrstatus &= ~port->ignore_status_mask;
if ((isrstatus & XUARTPS_IXR_TOUT) ||
(isrstatus & XUARTPS_IXR_RXTRIG)) {
/* Receive Timeout Interrupt */
while ((xuartps_readl(XUARTPS_SR_OFFSET) &
XUARTPS_SR_RXEMPTY) != XUARTPS_SR_RXEMPTY) {
data = xuartps_readl(XUARTPS_FIFO_OFFSET);
port->icount.rx++;
if (isrstatus & XUARTPS_IXR_PARITY) {
port->icount.parity++;
status = TTY_PARITY;
} else if (isrstatus & XUARTPS_IXR_FRAMING) {
port->icount.frame++;
status = TTY_FRAME;
} else if (isrstatus & XUARTPS_IXR_OVERRUN)
port->icount.overrun++;
uart_insert_char(port, isrstatus, XUARTPS_IXR_OVERRUN,
data, status);
}
spin_unlock(&port->lock);
tty_flip_buffer_push(&port->state->port);
spin_lock(&port->lock);
}
/* Dispatch an appropriate handler */
if ((isrstatus & XUARTPS_IXR_TXEMPTY) == XUARTPS_IXR_TXEMPTY) {
if (uart_circ_empty(&port->state->xmit)) {
xuartps_writel(XUARTPS_IXR_TXEMPTY,
XUARTPS_IDR_OFFSET);
} else {
numbytes = port->fifosize;
/* Break if no more data available in the UART buffer */
while (numbytes--) {
if (uart_circ_empty(&port->state->xmit))
break;
/* Get the data from the UART circular buffer
* and write it to the xuartps's TX_FIFO
* register.
*/
xuartps_writel(
port->state->xmit.buf[port->state->xmit.
tail], XUARTPS_FIFO_OFFSET);
port->icount.tx++;
/* Adjust the tail of the UART buffer and wrap
* the buffer if it reaches limit.
*/
port->state->xmit.tail =
(port->state->xmit.tail + 1) & \
(UART_XMIT_SIZE - 1);
}
if (uart_circ_chars_pending(
&port->state->xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
}
xuartps_writel(isrstatus, XUARTPS_ISR_OFFSET);
/* be sure to release the lock and tty before leaving */
spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
}