当前位置: 首页>>代码示例>>C++>>正文


C++ down_read_trylock函数代码示例

本文整理汇总了C++中down_read_trylock函数的典型用法代码示例。如果您正苦于以下问题:C++ down_read_trylock函数的具体用法?C++ down_read_trylock怎么用?C++ down_read_trylock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了down_read_trylock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: down_read_trylock_init

int __init down_read_trylock_init(void) 
{
	int ret;
	init_rwsem( &rwsem );   	//读写信号量初始化
	printk("<0>after init_rwsem, count: %ld\n",rwsem.count);

	if( EXEC_DOWN_WRITE )
		down_write( &rwsem );   //写者获取读写信号量

	ret = down_read_trylock( &rwsem ); 	//读者尝试获取读写信号量
	if(ret)
	{
		printk("<0>first down_read_trylock, count: %ld\n",rwsem.count);
		ret = down_read_trylock( &rwsem );   
		printk("<0>second down_read_trylock, count: %ld\n",rwsem.count);

		while( rwsem.count != 0 )
		{
			up_read( &rwsem );     	//读者释放读写信号量
		}
	}	
	else
		printk("<0>down_read_trylock failed!\n");

	return 0;
}
开发者ID:fjrti,项目名称:snippets,代码行数:26,代码来源:down_read_trylock.c

示例2: ocfs2_readpages

static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
			   struct list_head *pages, unsigned nr_pages)
{
	int ret, err = -EIO;
	struct inode *inode = mapping->host;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	loff_t start;
	struct page *last;

	ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
	if (ret)
		return err;

	if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
		ocfs2_inode_unlock(inode, 0);
		return err;
	}

	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
		goto out_unlock;

	last = list_entry(pages->prev, struct page, lru);
	start = (loff_t)last->index << PAGE_CACHE_SHIFT;
	if (start >= i_size_read(inode))
		goto out_unlock;

	err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block);

out_unlock:
	up_read(&oi->ip_alloc_sem);
	ocfs2_inode_unlock(inode, 0);

	return err;
}
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:34,代码来源:aops.c

示例3: ocfs2_readpage

static int ocfs2_readpage(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
	int ret, unlock = 1;

	mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));

	ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
	if (ret != 0) {
		if (ret == AOP_TRUNCATED_PAGE)
			unlock = 0;
		mlog_errno(ret);
		goto out;
	}

	if (down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem) == 0) {
		ret = AOP_TRUNCATED_PAGE;
		goto out_meta_unlock;
	}

	/*
	 * i_size might have just been updated as we grabed the meta lock.  We
	 * might now be discovering a truncate that hit on another node.
	 * block_read_full_page->get_block freaks out if it is asked to read
	 * beyond the end of a file, so we check here.  Callers
	 * (generic_file_read, vm_ops->fault) are clever enough to check i_size
	 * and notice that the page they just read isn't needed.
	 *
	 * XXX sys_readahead() seems to get that wrong?
	 */
	if (start >= i_size_read(inode)) {
		zero_user_page(page, 0, PAGE_SIZE, KM_USER0);
		SetPageUptodate(page);
		ret = 0;
		goto out_alloc;
	}

	ret = ocfs2_data_lock_with_page(inode, 0, page);
	if (ret != 0) {
		if (ret == AOP_TRUNCATED_PAGE)
			unlock = 0;
		mlog_errno(ret);
		goto out_alloc;
	}

	ret = block_read_full_page(page, ocfs2_get_block);
	unlock = 0;

	ocfs2_data_unlock(inode, 0);
out_alloc:
	up_read(&OCFS2_I(inode)->ip_alloc_sem);
out_meta_unlock:
	ocfs2_meta_unlock(inode, 0);
out:
	if (unlock)
		unlock_page(page);
	mlog_exit(ret);
	return ret;
}
开发者ID:robacklin,项目名称:nxc2620,代码行数:60,代码来源:aops.c

示例4: nilfs_test_bdev_super2

static int nilfs_test_bdev_super2(struct super_block *s, void *data)
{
	struct nilfs_super_data *sd = data;
	int ret;

	if (s->s_bdev != sd->bdev)
		return 0;

	if (!((s->s_flags | sd->flags) & MS_RDONLY))
		return 1; /* Reuse an old R/W-mode super_block */

	if (s->s_flags & sd->flags & MS_RDONLY) {
		if (down_read_trylock(&s->s_umount)) {
			ret = s->s_root &&
				(sd->cno == NILFS_SB(s)->s_snapshot_cno);
			up_read(&s->s_umount);
			/*
			 * This path is locked with sb_lock by sget().
			 * So, drop_super() causes deadlock.
			 */
			return ret;
		}
	}
	return 0;
}
开发者ID:mikeberkelaar,项目名称:grhardened,代码行数:25,代码来源:super.c

示例5: xfs_sync_worker

/*
 * Every sync period we need to unpin all items, reclaim inodes and sync
 * disk quotas.  We might need to cover the log to indicate that the
 * filesystem is idle and not frozen.
 */
STATIC void
xfs_sync_worker(
	struct work_struct *work)
{
	struct xfs_mount *mp = container_of(to_delayed_work(work),
					struct xfs_mount, m_sync_work);
	int		error;

	/*
	 * We shouldn't write/force the log if we are in the mount/unmount
	 * process or on a read only filesystem. The workqueue still needs to be
	 * active in both cases, however, because it is used for inode reclaim
	 * during these times.  Use the s_umount semaphore to provide exclusion
	 * with unmount.
	 */
	if (down_read_trylock(&mp->m_super->s_umount)) {
		if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
			/* dgc: errors ignored here */
			if (mp->m_super->s_frozen == SB_UNFROZEN &&
			    xfs_log_need_covered(mp))
				error = xfs_fs_log_dummy(mp);
			else
				xfs_log_force(mp, 0);

			/* start pushing all the metadata that is currently
			 * dirty */
			xfs_ail_push_all(mp->m_ail);
		}
		up_read(&mp->m_super->s_umount);
	}

	/* queue us up again */
	xfs_syncd_queue_sync(mp);
}
开发者ID:AsherBond,项目名称:ceph-client,代码行数:39,代码来源:xfs_sync.c

示例6: unuse_mm

static int unuse_mm(struct mm_struct *mm,
				swp_entry_t entry, struct page *page)
{
	struct vm_area_struct *vma;

	if (!down_read_trylock(&mm->mmap_sem)) {
		/*
		 * Activate page so shrink_cache is unlikely to unmap its
		 * ptes while lock is dropped, so swapoff can make progress.
		 */
		activate_page(page);
		unlock_page(page);
		down_read(&mm->mmap_sem);
		lock_page(page);
	}
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		if (vma->anon_vma && unuse_vma(vma, entry, page))
			break;
	}
	up_read(&mm->mmap_sem);
	/*
	 * Currently unuse_mm cannot fail, but leave error handling
	 * at call sites for now, since we change it from time to time.
	 */
	return 0;
}
开发者ID:acassis,项目名称:emlinux-ssd1935,代码行数:26,代码来源:swapfile.c

示例7: try_to_get_nonexclusive_access

/**
 * try_to_get_nonexclusive_access - try to get nonexclusive access to a file
 * @uf_info: unix file specific part of inode to obtain access to
 *
 * Non-blocking version of nonexclusive access obtaining.
 */
int try_to_get_nonexclusive_access(struct unix_file_info *uf_info)
{
	int result;

	result = down_read_trylock(&uf_info->latch);
	if (result)
		nea_grabbed(uf_info);
	return result;
}
开发者ID:mgross029,项目名称:android_kernel,代码行数:15,代码来源:tail_conversion.c

示例8: l4x_evict_tasks

void l4x_evict_tasks(struct task_struct *exclude)
{
	struct task_struct *p;
	int cnt = 0;

	rcu_read_lock();
	for_each_process(p) {
		l4_cap_idx_t t;
		struct mm_struct *mm;

		if (p == exclude)
			continue;

		task_lock(p);
		mm = p->mm;
		if (!mm) {
			task_unlock(p);
			continue;
		}

		t = ACCESS_ONCE(mm->context.task);

		if (l4_is_invalid_cap(t)) {
			task_unlock(p);
			continue;
		}

		if (down_read_trylock(&mm->mmap_sem)) {
			struct vm_area_struct *vma;
			for (vma = mm->mmap; vma; vma = vma->vm_next)
				if (vma->vm_flags & VM_LOCKED) {
					t = L4_INVALID_CAP;
					break;
				}
			up_read(&mm->mmap_sem);
			if (!vma)
				if (cmpxchg(&mm->context.task, t, L4_INVALID_CAP) != t)
					t = L4_INVALID_CAP;
		} else
			t = L4_INVALID_CAP;

		task_unlock(p);

		if (!l4_is_invalid_cap(t)) {
			l4lx_task_delete_task(t);
			l4lx_task_number_free(t);

			if (++cnt > 10)
				break;
		}
	}
	rcu_read_unlock();

	if (cnt == 0)
		pr_info_ratelimited("l4x-evict: Found no process to free.\n");
}
开发者ID:michas2,项目名称:l4re-snapshot,代码行数:56,代码来源:process_gen.c

示例9: cfs_access_process_vm

static int cfs_access_process_vm(struct task_struct *tsk,
				 struct mm_struct *mm,
				 unsigned long addr,
				 void *buf, int len, int write)
{
	/* Just copied from kernel for the kernels which doesn't
	 * have access_process_vm() exported */
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	/* Avoid deadlocks on mmap_sem if called from sys_mmap_pgoff(),
	 * which is already holding mmap_sem for writes.  If some other
	 * thread gets the write lock in the meantime, this thread will
	 * block, but at least it won't deadlock on itself.  LU-1735 */
	if (down_read_trylock(&mm->mmap_sem) == 0)
		return -EDEADLK;

	/* ignore errors, just check how much was successfully transferred */
	while (len) {
		int bytes, rc, offset;
		void *maddr;

		rc = get_user_pages(tsk, mm, addr, 1,
				     write, 1, &page, &vma);
		if (rc <= 0)
			break;

		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		maddr = kmap(page);
		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		kunmap(page);
		page_cache_release(page);
		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);

	return buf - old_buf;
}
开发者ID:Keeper-of-the-Keys,项目名称:Lustre,代码行数:52,代码来源:linux-curproc.c

示例10: foo_show

static ssize_t foo_show(struct kobject *kobj, struct kobj_attribute *attr,
	char *buf)
{
	size_t count;

	/* down_read_trylock returns zero on failure */
	if (!down_read_trylock(&rw_sem))
		return -ERESTARTSYS;

	count = sprintf(buf, "%s\n", foo_kbuff);

	up_read(&rw_sem);
	return count;
}
开发者ID:rahulraghu94,项目名称:linux-device-drivers,代码行数:14,代码来源:rwlock.c

示例11: ocfs2_readpage

static int ocfs2_readpage(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
	int ret, unlock = 1;

	trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
			     (page ? page->index : 0));

	ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
	if (ret != 0) {
		if (ret == AOP_TRUNCATED_PAGE)
			unlock = 0;
		mlog_errno(ret);
		goto out;
	}

	if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
		ret = AOP_TRUNCATED_PAGE;
		unlock_page(page);
		unlock = 0;
		down_read(&oi->ip_alloc_sem);
		up_read(&oi->ip_alloc_sem);
		goto out_inode_unlock;
	}

	if (start >= i_size_read(inode)) {
		zero_user(page, 0, PAGE_SIZE);
		SetPageUptodate(page);
		ret = 0;
		goto out_alloc;
	}

	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
		ret = ocfs2_readpage_inline(inode, page);
	else
		ret = block_read_full_page(page, ocfs2_get_block);
	unlock = 0;

out_alloc:
	up_read(&OCFS2_I(inode)->ip_alloc_sem);
out_inode_unlock:
	ocfs2_inode_unlock(inode, 0);
out:
	if (unlock)
		unlock_page(page);
	return ret;
}
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:49,代码来源:aops.c

示例12: siw_qp_llp_data_ready

static void siw_qp_llp_data_ready(struct sock *sk)
#endif
{
	struct siw_qp		*qp;

	read_lock(&sk->sk_callback_lock);

	if (unlikely(!sk->sk_user_data || !sk_to_qp(sk))) {
		dprint(DBG_ON, " No QP: %p\n", sk->sk_user_data);
		goto done;
	}
	qp = sk_to_qp(sk);

	if (down_read_trylock(&qp->state_lock)) {
		read_descriptor_t	rd_desc = {.arg.data = qp, .count = 1};

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
		dprint(DBG_SK|DBG_RX, "(QP%d): "
			"state (before tcp_read_sock)=%d, bytes=%x\n",
			QP_ID(qp), qp->attrs.state, bytes);
#else
		dprint(DBG_SK|DBG_RX, "(QP%d): "
			"state (before tcp_read_sock)=%d\n",
			QP_ID(qp), qp->attrs.state);
#endif

		if (likely(qp->attrs.state == SIW_QP_STATE_RTS))
			/*
			 * Implements data receive operation during
			 * socket callback. TCP gracefully catches
			 * the case where there is nothing to receive
			 * (not calling siw_tcp_rx_data() then).
			 */
			tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data);

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
		dprint(DBG_SK|DBG_RX, "(QP%d): "
			"state (after tcp_read_sock)=%d, bytes=%x\n",
			QP_ID(qp), qp->attrs.state, bytes);
#else
		dprint(DBG_SK|DBG_RX, "(QP%d): "
			"state (after tcp_read_sock)=%d\n",
			QP_ID(qp), qp->attrs.state);
#endif

		up_read(&qp->state_lock);
	} else {
开发者ID:asaf-levy,项目名称:softiwarp,代码行数:47,代码来源:siw_qp.c

示例13: ocfs2_readpages

/*
 * This is used only for read-ahead. Failures or difficult to handle
 * situations are safe to ignore.
 *
 * Right now, we don't bother with BH_Boundary - in-inode extent lists
 * are quite large (243 extents on 4k blocks), so most inodes don't
 * grow out to a tree. If need be, detecting boundary extents could
 * trivially be added in a future version of ocfs2_get_block().
 */
static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
			   struct list_head *pages, unsigned nr_pages)
{
	int ret, err = -EIO;
	struct inode *inode = mapping->host;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	loff_t start;
	struct page *last;

	/*
	 * Use the nonblocking flag for the dlm code to avoid page
	 * lock inversion, but don't bother with retrying.
	 */
	ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
	if (ret)
		return err;

	if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
		ocfs2_inode_unlock(inode, 0);
		return err;
	}

	/*
	 * Don't bother with inline-data. There isn't anything
	 * to read-ahead in that case anyway...
	 */
	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
		goto out_unlock;

	/*
	 * Check whether a remote node truncated this file - we just
	 * drop out in that case as it's not worth handling here.
	 */
	last = list_entry(pages->prev, struct page, lru);
	start = (loff_t)last->index << PAGE_CACHE_SHIFT;
	if (start >= i_size_read(inode))
		goto out_unlock;

	err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block);

out_unlock:
	up_read(&oi->ip_alloc_sem);
	ocfs2_inode_unlock(inode, 0);

	return err;
}
开发者ID:458941968,项目名称:mini2440-kernel-2.6.29,代码行数:55,代码来源:aops.c

示例14: oom_reap_task_mm

/*
 * Reaps the address space of the give task.
 *
 * Returns true on success and false if none or part of the address space
 * has been reclaimed and the caller should retry later.
 */
static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
{
	bool ret = true;

	if (!down_read_trylock(&mm->mmap_sem)) {
		trace_skip_task_reaping(tsk->pid);
		return false;
	}

	/*
	 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
	 * work on the mm anymore. The check for MMF_OOM_SKIP must run
	 * under mmap_sem for reading because it serializes against the
	 * down_write();up_write() cycle in exit_mmap().
	 */
	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
		trace_skip_task_reaping(tsk->pid);
		goto out_unlock;
	}

	trace_start_task_reaping(tsk->pid);

	/* failed to reap part of the address space. Try again later */
	ret = __oom_reap_task_mm(mm);
	if (!ret)
		goto out_finish;

	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
			task_pid_nr(tsk), tsk->comm,
			K(get_mm_counter(mm, MM_ANONPAGES)),
			K(get_mm_counter(mm, MM_FILEPAGES)),
			K(get_mm_counter(mm, MM_SHMEMPAGES)));
out_finish:
	trace_finish_task_reaping(tsk->pid);
out_unlock:
	up_read(&mm->mmap_sem);

	return ret;
}
开发者ID:avagin,项目名称:linux,代码行数:45,代码来源:oom_kill.c

示例15: mem_free_percent

static ssize_t mem_free_percent(void)
{
	unsigned long mem_used_pages = 0;
	u64 val = 0;
	int i = 0;
	struct zram *zram = NULL;
	struct zram_meta *meta = NULL;
	unsigned long total_zram_pages = totalram_pages*total_mem_usage_percent/100;

	for (i = 0; i < zram_get_num_devices(); i++) {

		zram = &zram_devices[i];
		if(!zram || !zram->disk)
		{
			continue;
		}

		if( !down_read_trylock(&zram->init_lock) )
			return -1;

		if(zram->init_done)
		{
			meta = zram->meta;
			if (meta && meta->mem_pool)
			{
				val += zs_get_total_pages(meta->mem_pool);
			}
		}

		up_read(&zram->init_lock);
	}

	mem_used_pages = val;

	pr_debug("ZRAM:totalram_pages:%lu,total_zram_pages:%lu,mem_used_pages:%lu\r\n", totalram_pages, total_zram_pages,mem_used_pages);

	return (mem_used_pages >= total_zram_pages) ? 0 : ((total_zram_pages - mem_used_pages)*100/total_zram_pages);
}
开发者ID:NhlalukoG,项目名称:android_kernel_samsung_goyave3g,代码行数:38,代码来源:zram_sysfs.c


注:本文中的down_read_trylock函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。