当前位置: 首页>>代码示例>>C++>>正文


C++ spin_unlock函数代码示例

本文整理汇总了C++中spin_unlock函数的典型用法代码示例。如果您正苦于以下问题:C++ spin_unlock函数的具体用法?C++ spin_unlock怎么用?C++ spin_unlock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了spin_unlock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: inet_csk

static struct sock *tcp_fastopen_create_child(struct sock *sk,
					      struct sk_buff *skb,
					      struct dst_entry *dst,
					      struct request_sock *req)
{
	struct tcp_sock *tp;
	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
	struct sock *child;
	bool own_req;

	req->num_retrans = 0;
	req->num_timeout = 0;
	req->sk = NULL;

	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
							 NULL, &own_req);
	if (!child)
		return NULL;

	spin_lock(&queue->fastopenq.lock);
	queue->fastopenq.qlen++;
	spin_unlock(&queue->fastopenq.lock);

	/* Initialize the child socket. Have to fix some values to take
	 * into account the child is a Fast Open socket and is created
	 * only out of the bits carried in the SYN packet.
	 */
	tp = tcp_sk(child);

	tp->fastopen_rsk = req;
	tcp_rsk(req)->tfo_listener = true;

	/* RFC1323: The window in SYN & SYN/ACK segments is never
	 * scaled. So correct it appropriately.
	 */
	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);

	/* Activate the retrans timer so that SYNACK can be retransmitted.
	 * The request socket is not added to the ehash
	 * because it's been added to the accept queue directly.
	 */
	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);

	atomic_set(&req->rsk_refcnt, 2);

	/* Now finish processing the fastopen child socket. */
	inet_csk(child)->icsk_af_ops->rebuild_header(child);
	tcp_init_congestion_control(child);
	tcp_mtup_init(child);
	tcp_init_metrics(child);
	tcp_init_buffer_space(child);

	tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;

	tcp_fastopen_add_skb(child, skb);

	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
	/* tcp_conn_request() is sending the SYNACK,
	 * and queues the child into listener accept queue.
	 */
	return child;
}
开发者ID:020gzh,项目名称:linux,代码行数:63,代码来源:tcp_fastopen.c

示例2: ext3_free_inode

/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
void ext3_free_inode (handle_t *handle, struct inode * inode)
{
	struct super_block * sb = inode->i_sb;
	int is_directory;
	unsigned long ino;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
	unsigned long block_group;
	unsigned long bit;
	struct ext3_group_desc * gdp;
	struct ext3_super_block * es;
	struct ext3_sb_info *sbi;
	int fatal = 0, err;

	if (atomic_read(&inode->i_count) > 1) {
		printk ("ext3_free_inode: inode has count=%d\n",
					atomic_read(&inode->i_count));
		return;
	}
	if (inode->i_nlink) {
		printk ("ext3_free_inode: inode has nlink=%d\n",
			inode->i_nlink);
		return;
	}
	if (!sb) {
;
		return;
	}
	sbi = EXT3_SB(sb);

	ino = inode->i_ino;
	ext3_debug ("freeing inode %lu\n", ino);

	is_directory = S_ISDIR(inode->i_mode);

	es = EXT3_SB(sb)->s_es;
	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext3_error (sb, "ext3_free_inode",
			    "reserved or nonexistent inode %lu", ino);
		goto error_return;
	}
	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
	bitmap_bh = read_inode_bitmap(sb, block_group);
	if (!bitmap_bh)
		goto error_return;

	BUFFER_TRACE(bitmap_bh, "get_write_access");
	fatal = ext3_journal_get_write_access(handle, bitmap_bh);
	if (fatal)
		goto error_return;

	/* Ok, now we can actually update the inode bitmaps.. */
	if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
					bit, bitmap_bh->b_data))
		ext3_error (sb, "ext3_free_inode",
			      "bit already cleared for inode %lu", ino);
	else {
		gdp = ext3_get_group_desc (sb, block_group, &bh2);

		BUFFER_TRACE(bh2, "get_write_access");
		fatal = ext3_journal_get_write_access(handle, bh2);
		if (fatal) goto error_return;

		if (gdp) {
			spin_lock(sb_bgl_lock(sbi, block_group));
			le16_add_cpu(&gdp->bg_free_inodes_count, 1);
			if (is_directory)
				le16_add_cpu(&gdp->bg_used_dirs_count, -1);
			spin_unlock(sb_bgl_lock(sbi, block_group));
			percpu_counter_inc(&sbi->s_freeinodes_counter);
			if (is_directory)
				percpu_counter_dec(&sbi->s_dirs_counter);

		}
		BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
		err = ext3_journal_dirty_metadata(handle, bh2);
		if (!fatal) fatal = err;
	}
	BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
	if (!fatal)
		fatal = err;

//.........这里部分代码省略.........
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:101,代码来源:ialloc.c

示例3: xpmem_open

/*
 * User open of the XPMEM driver. Called whenever /dev/xpmem is opened.
 * Create a struct xpmem_thread_group structure for the specified thread group.
 * And add the structure to the tg hash table.
 */
static int
xpmem_open(struct inode *inode, struct file *file)
{
	struct xpmem_thread_group *tg;
	int index;
	struct proc_dir_entry *unpin_entry;
	char tgid_string[XPMEM_TGID_STRING_LEN];

	/* if this has already been done, just return silently */
	tg = xpmem_tg_ref_by_tgid(current->tgid);
	if (!IS_ERR(tg)) {
		xpmem_tg_deref(tg);
		return 0;
	}

	/* create tg */
	tg = kzalloc(sizeof(struct xpmem_thread_group), GFP_KERNEL);
	if (tg == NULL) {
		return -ENOMEM;
	}

	tg->lock = SPIN_LOCK_UNLOCKED;
	tg->tgid = current->tgid;
	tg->uid = current->cred->uid;
	tg->gid = current->cred->gid;
	atomic_set(&tg->uniq_segid, 0);
	atomic_set(&tg->uniq_apid, 0);
	atomic_set(&tg->n_pinned, 0);
	tg->addr_limit = TASK_SIZE;
	tg->seg_list_lock = RW_LOCK_UNLOCKED;
	INIT_LIST_HEAD(&tg->seg_list);
	INIT_LIST_HEAD(&tg->tg_hashlist);
	atomic_set(&tg->n_recall_PFNs, 0);
	mutex_init(&tg->recall_PFNs_mutex);
	init_waitqueue_head(&tg->block_recall_PFNs_wq);
	init_waitqueue_head(&tg->allow_recall_PFNs_wq);
	tg->mmu_initialized = 0;
	tg->mmu_unregister_called = 0;
	tg->mm = current->mm;

	/* Register MMU notifier callbacks */
	if (xpmem_mmu_notifier_init(tg) != 0) {
		kfree(tg);
		return -EFAULT;
	}
	
	/* create and initialize struct xpmem_access_permit hashtable */
	tg->ap_hashtable = kzalloc(sizeof(struct xpmem_hashlist) *
				     XPMEM_AP_HASHTABLE_SIZE, GFP_KERNEL);
	if (tg->ap_hashtable == NULL) {
		xpmem_mmu_notifier_unlink(tg);
		kfree(tg);
		return -ENOMEM;
	}
	for (index = 0; index < XPMEM_AP_HASHTABLE_SIZE; index++) {
		tg->ap_hashtable[index].lock = RW_LOCK_UNLOCKED;
		INIT_LIST_HEAD(&tg->ap_hashtable[index].list);
	}

	snprintf(tgid_string, XPMEM_TGID_STRING_LEN, "%d", current->tgid);
	spin_lock(&xpmem_unpin_procfs_lock);
	unpin_entry = create_proc_entry(tgid_string, 0644,
					xpmem_unpin_procfs_dir);
	spin_unlock(&xpmem_unpin_procfs_lock);
	if (unpin_entry != NULL) {
		unpin_entry->data = (void *)(unsigned long)current->tgid;
		unpin_entry->write_proc = xpmem_unpin_procfs_write;
		unpin_entry->read_proc = xpmem_unpin_procfs_read;
		//unpin_entry->owner = THIS_MODULE;
		unpin_entry->uid = current->cred->uid;
		unpin_entry->gid = current->cred->gid;
	}

	xpmem_tg_not_destroyable(tg);

	/* add tg to its hash list */
	index = xpmem_tg_hashtable_index(tg->tgid);
	write_lock(&xpmem_my_part->tg_hashtable[index].lock);
	list_add_tail(&tg->tg_hashlist,
		      &xpmem_my_part->tg_hashtable[index].list);
	write_unlock(&xpmem_my_part->tg_hashtable[index].lock);

	/*
	 * Increment 'usage' and 'mm->mm_users' for the current task's thread
	 * group leader. This ensures that both its task_struct and mm_struct
	 * will still be around when our thread group exits. (The Linux kernel
	 * normally tears down the mm_struct prior to calling a module's
	 * 'flush' function.) Since all XPMEM thread groups must go through
	 * this path, this extra reference to mm_users also allows us to
	 * directly inc/dec mm_users in xpmem_ensure_valid_PFNs() and avoid
	 * mmput() which has a scaling issue with the mmlist_lock.
	 */
	get_task_struct(current->group_leader);
	tg->group_leader = current->group_leader;
	BUG_ON(current->mm != current->group_leader->mm);
//.........这里部分代码省略.........
开发者ID:hyoklee,项目名称:xpmem,代码行数:101,代码来源:xpmem_main.c

示例4: mdss_dsi_isr

irqreturn_t mdss_dsi_isr(int irq, void *ptr)
{
	u32 isr;
	struct mdss_dsi_ctrl_pdata *ctrl =
			(struct mdss_dsi_ctrl_pdata *)ptr;

	if (!ctrl->ctrl_base)
		pr_err("%s:%d DSI base adr no Initialized",
				       __func__, __LINE__);

	isr = MIPI_INP(ctrl->ctrl_base + 0x0110);/* DSI_INTR_CTRL */
	MIPI_OUTP(ctrl->ctrl_base + 0x0110, isr);

	if (ctrl->shared_pdata.broadcast_enable)
		if ((ctrl->panel_data.panel_info.pdest == DISPLAY_2)
		    && (left_ctrl_pdata != NULL)) {
			u32 isr0;
			isr0 = MIPI_INP(left_ctrl_pdata->ctrl_base
						+ 0x0110);/* DSI_INTR_CTRL */
			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0110, isr0);
		}

	pr_debug("%s: ndx=%d isr=%x\n", __func__, ctrl->ndx, isr);

	if (isr & DSI_INTR_ERROR) {
#ifdef F_WA_WATCHDOG_DURING_BOOTUP
		if(ctrl->octa_blck_set)
#endif	
		
		pr_err("%s: ndx=%d isr=%x\n", __func__, ctrl->ndx, isr);
		mdss_dsi_error(ctrl);
	}

	if (isr & DSI_INTR_VIDEO_DONE) {
		spin_lock(&ctrl->mdp_lock);
		mdss_dsi_disable_irq_nosync(ctrl, DSI_VIDEO_TERM);
		complete(&ctrl->video_comp);
		spin_unlock(&ctrl->mdp_lock);
	}

	if (isr & DSI_INTR_CMD_DMA_DONE) {
		spin_lock(&ctrl->mdp_lock);
		mdss_dsi_disable_irq_nosync(ctrl, DSI_CMD_TERM);
		complete(&ctrl->dma_comp);
		spin_unlock(&ctrl->mdp_lock);
	}

	if (isr & DSI_INTR_CMD_MDP_DONE) {
		spin_lock(&ctrl->mdp_lock);
		ctrl->mdp_busy = false;
		mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
		complete(&ctrl->mdp_comp);
		spin_unlock(&ctrl->mdp_lock);
	}

	if (isr & DSI_INTR_BTA_DONE) {
		spin_lock(&ctrl->mdp_lock);
		mdss_dsi_disable_irq_nosync(ctrl, DSI_BTA_TERM);
		complete(&ctrl->bta_comp);
		spin_unlock(&ctrl->mdp_lock);
	}

	return IRQ_HANDLED;
}
开发者ID:thanhphat11,项目名称:android_kernel_pantech_910,代码行数:64,代码来源:mdss_dsi_host.c

示例5: fd_link_ioctl

static long
fd_link_ioctl (struct file *f, unsigned int ioctl, unsigned long arg)
{
	void __user *argp = (void __user *) arg;
	struct task_struct *task_target = NULL;
	struct file *file;
	struct files_struct *files;
	struct fdtable *fdt;
	struct fd_copy fd_copy;

	switch (ioctl)
	{
		case FD_COPY:
			if (copy_from_user (&fd_copy, argp, sizeof (struct fd_copy)))
				return -EFAULT;

			/*
			 * Find the task struct for the target pid
			 */
			task_target =
				pid_task (find_vpid (fd_copy.target_pid), PIDTYPE_PID);
			if (task_target == NULL)
			{
				printk (KERN_DEBUG "Failed to get mem ctx for target pid\n");
				return -EFAULT;
			}

			files = get_files_struct (current);
			if (files == NULL)
			{
				printk (KERN_DEBUG "Failed to get files struct\n");
				return -EFAULT;
			}

			rcu_read_lock ();
			file = fcheck_files (files, fd_copy.source_fd);
			if (file)
			{
				if (file->f_mode & FMODE_PATH
						|| !atomic_long_inc_not_zero (&file->f_count))
					file = NULL;
			}
			rcu_read_unlock ();
			put_files_struct (files);

			if (file == NULL)
			{
				printk (KERN_DEBUG "Failed to get file from source pid\n");
				return 0;
			}

			/*
			 * Release the existing fd in the source process
			 */
			spin_lock (&files->file_lock);
			filp_close (file, files);
			fdt = files_fdtable (files);
			fdt->fd[fd_copy.source_fd] = NULL;
			spin_unlock (&files->file_lock);

			/*
			 * Find the file struct associated with the target fd.
			 */

			files = get_files_struct (task_target);
			if (files == NULL)
			{
				printk (KERN_DEBUG "Failed to get files struct\n");
				return -EFAULT;
			}

			rcu_read_lock ();
			file = fcheck_files (files, fd_copy.target_fd);
			if (file)
			{
				if (file->f_mode & FMODE_PATH
						|| !atomic_long_inc_not_zero (&file->f_count))
					file = NULL;
			}
			rcu_read_unlock ();
			put_files_struct (files);

			if (file == NULL)
			{
				printk (KERN_DEBUG "Failed to get file from target pid\n");
				return 0;
			}


			/*
			 * Install the file struct from the target process into the
			 * file desciptor of the source process,
			 */

			fd_install (fd_copy.source_fd, file);

			return 0;

		default:
			return -ENOIOCTLCMD;
//.........这里部分代码省略.........
开发者ID:Grace-Liu,项目名称:dpdk-ovs,代码行数:101,代码来源:fd_link.c

示例6: id_to_sid

static int
id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
{
	int rc = 0;
	struct key *sidkey;
	const struct cred *saved_cred;
	struct cifs_sid *lsid;
	struct cifs_sid_id *psidid, *npsidid;
	struct rb_root *cidtree;
	spinlock_t *cidlock;

	if (sidtype == SIDOWNER) {
		cidlock = &siduidlock;
		cidtree = &uidtree;
	} else if (sidtype == SIDGROUP) {
		cidlock = &sidgidlock;
		cidtree = &gidtree;
	} else
		return -EINVAL;

	spin_lock(cidlock);
	psidid = sid_rb_search(cidtree, cid);

	if (!psidid) { /* node does not exist, allocate one & attempt adding */
		spin_unlock(cidlock);
		npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
		if (!npsidid)
			return -ENOMEM;

		npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
		if (!npsidid->sidstr) {
			kfree(npsidid);
			return -ENOMEM;
		}

		spin_lock(cidlock);
		psidid = sid_rb_search(cidtree, cid);
		if (psidid) { /* node happened to get inserted meanwhile */
			++psidid->refcount;
			spin_unlock(cidlock);
			kfree(npsidid->sidstr);
			kfree(npsidid);
		} else {
			psidid = npsidid;
			sid_rb_insert(cidtree, cid, &psidid,
					sidtype == SIDOWNER ? "oi:" : "gi:");
			++psidid->refcount;
			spin_unlock(cidlock);
		}
	} else {
		++psidid->refcount;
		spin_unlock(cidlock);
	}

	/*
	 * If we are here, it is safe to access psidid and its fields
	 * since a reference was taken earlier while holding the spinlock.
	 * A reference on the node is put without holding the spinlock
	 * and it is OK to do so in this case, shrinker will not erase
	 * this node until all references are put and we do not access
	 * any fields of the node after a reference is put .
	 */
	if (test_bit(SID_ID_MAPPED, &psidid->state)) {
		memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
		psidid->time = jiffies; /* update ts for accessing */
		goto id_sid_out;
	}

	if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
		rc = -EINVAL;
		goto id_sid_out;
	}

	if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
		saved_cred = override_creds(root_cred);
		sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
		if (IS_ERR(sidkey)) {
			rc = -EINVAL;
			cFYI(1, "%s: Can't map and id to a SID", __func__);
		} else {
			lsid = (struct cifs_sid *)sidkey->payload.data;
			memcpy(&psidid->sid, lsid,
				sidkey->datalen < sizeof(struct cifs_sid) ?
				sidkey->datalen : sizeof(struct cifs_sid));
			memcpy(ssid, &psidid->sid,
				sidkey->datalen < sizeof(struct cifs_sid) ?
				sidkey->datalen : sizeof(struct cifs_sid));
			set_bit(SID_ID_MAPPED, &psidid->state);
			key_put(sidkey);
			kfree(psidid->sidstr);
		}
		psidid->time = jiffies; /* update ts for accessing */
		revert_creds(saved_cred);
		clear_bit(SID_ID_PENDING, &psidid->state);
		wake_up_bit(&psidid->state, SID_ID_PENDING);
	} else {
		rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
				sidid_pending_wait, TASK_INTERRUPTIBLE);
		if (rc) {
			cFYI(1, "%s: sidid_pending_wait interrupted %d",
//.........这里部分代码省略.........
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:101,代码来源:cifsacl.c

示例7: spin_lock

static struct gdbio_state *gdbio_htable_lookup(char *gdbcons){
	spin_lock(&htable_lock); 
	struct gdbio_state *gs = htable_lookup(gdbio_htable, gdbcons);
	spin_unlock(&htable_lock);
	return gs;
}
开发者ID:HobbesOSR,项目名称:kitten,代码行数:6,代码来源:gdbio.c

示例8: find_cifs_entry

/*
 * Find the corresponding entry in the search. Note that the SMB server returns
 * search entries for . and .. which complicates logic here if we choose to
 * parse for them and we do not assume that they are located in the findfirst
 * return buffer. We start counting in the buffer with entry 2 and increment for
 * every entry (do not increment for . or .. entry).
 */
static int
find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
		struct file *file, char **current_entry, int *num_to_ret)
{
	__u16 search_flags;
	int rc = 0;
	int pos_in_buf = 0;
	loff_t first_entry_in_buffer;
	loff_t index_to_find = pos;
	struct cifsFileInfo *cfile = file->private_data;
	struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
	struct TCP_Server_Info *server = tcon->ses->server;
	/* check if index in the buffer */

	if (!server->ops->query_dir_first || !server->ops->query_dir_next)
		return -ENOSYS;

	if ((cfile == NULL) || (current_entry == NULL) || (num_to_ret == NULL))
		return -ENOENT;

	*current_entry = NULL;
	first_entry_in_buffer = cfile->srch_inf.index_of_last_entry -
					cfile->srch_inf.entries_in_buffer;

	/*
	 * If first entry in buf is zero then is first buffer
	 * in search response data which means it is likely . and ..
	 * will be in this buffer, although some servers do not return
	 * . and .. for the root of a drive and for those we need
	 * to start two entries earlier.
	 */

	dump_cifs_file_struct(file, "In fce ");
	if (((index_to_find < cfile->srch_inf.index_of_last_entry) &&
	     is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) {
		/* close and restart search */
		cifs_dbg(FYI, "search backing up - close and restart search\n");
		spin_lock(&cifs_file_list_lock);
		if (server->ops->dir_needs_close(cfile)) {
			cfile->invalidHandle = true;
			spin_unlock(&cifs_file_list_lock);
			if (server->ops->close)
				server->ops->close(xid, tcon, &cfile->fid);
		} else
			spin_unlock(&cifs_file_list_lock);
		if (cfile->srch_inf.ntwrk_buf_start) {
			cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n");
			if (cfile->srch_inf.smallBuf)
				cifs_small_buf_release(cfile->srch_inf.
						ntwrk_buf_start);
			else
				cifs_buf_release(cfile->srch_inf.
						ntwrk_buf_start);
			cfile->srch_inf.ntwrk_buf_start = NULL;
		}
		rc = initiate_cifs_search(xid, file);
		if (rc) {
			cifs_dbg(FYI, "error %d reinitiating a search on rewind\n",
				 rc);
			return rc;
		}
		/* FindFirst/Next set last_entry to NULL on malformed reply */
		if (cfile->srch_inf.last_entry)
			cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
	}

	search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
	if (backup_cred(cifs_sb))
		search_flags |= CIFS_SEARCH_BACKUP_SEARCH;

	while ((index_to_find >= cfile->srch_inf.index_of_last_entry) &&
	       (rc == 0) && !cfile->srch_inf.endOfSearch) {
		cifs_dbg(FYI, "calling findnext2\n");
		rc = server->ops->query_dir_next(xid, tcon, &cfile->fid,
						 search_flags,
						 &cfile->srch_inf);
		/* FindFirst/Next set last_entry to NULL on malformed reply */
		if (cfile->srch_inf.last_entry)
			cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
		if (rc)
			return -ENOENT;
	}
	if (index_to_find < cfile->srch_inf.index_of_last_entry) {
		/* we found the buffer that contains the entry */
		/* scan and find it */
		int i;
		char *cur_ent;
		char *end_of_smb = cfile->srch_inf.ntwrk_buf_start +
			server->ops->calc_smb_size(
					cfile->srch_inf.ntwrk_buf_start);

		cur_ent = cfile->srch_inf.srch_entries_start;
		first_entry_in_buffer = cfile->srch_inf.index_of_last_entry
//.........这里部分代码省略.........
开发者ID:FT-Liang,项目名称:linux,代码行数:101,代码来源:readdir.c

示例9: new_inode

struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
{
	struct super_block *sb = dir->i_sb;
	struct the_nilfs *nilfs = sb->s_fs_info;
	struct inode *inode;
	struct nilfs_inode_info *ii;
	struct nilfs_root *root;
	int err = -ENOMEM;
	ino_t ino;

	inode = new_inode(sb);
	if (unlikely(!inode))
		goto failed;

	mapping_set_gfp_mask(inode->i_mapping,
			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);

	root = NILFS_I(dir)->i_root;
	ii = NILFS_I(inode);
	ii->i_state = 1 << NILFS_I_NEW;
	ii->i_root = root;

	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
	if (unlikely(err))
		goto failed_ifile_create_inode;
	/* reference count of i_bh inherits from nilfs_mdt_read_block() */

	atomic_inc(&root->inodes_count);
	inode_init_owner(inode, dir, mode);
	inode->i_ino = ino;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;

	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
		err = nilfs_bmap_read(ii->i_bmap, NULL);
		if (err < 0)
			goto failed_bmap;

		set_bit(NILFS_I_BMAP, &ii->i_state);
		/* No lock is needed; iget() ensures it. */
	}

	ii->i_flags = nilfs_mask_flags(
		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);

	/* ii->i_file_acl = 0; */
	/* ii->i_dir_acl = 0; */
	ii->i_dir_start_lookup = 0;
	nilfs_set_inode_flags(inode);
	spin_lock(&nilfs->ns_next_gen_lock);
	inode->i_generation = nilfs->ns_next_generation++;
	spin_unlock(&nilfs->ns_next_gen_lock);
	insert_inode_hash(inode);

	err = nilfs_init_acl(inode, dir);
	if (unlikely(err))
		goto failed_acl; /* never occur. When supporting
				    nilfs_init_acl(), proper cancellation of
				    above jobs should be considered */

	return inode;

 failed_acl:
 failed_bmap:
	clear_nlink(inode);
	iput(inode);  /* raw_inode will be deleted through
			 generic_delete_inode() */
	goto failed;

 failed_ifile_create_inode:
	make_bad_inode(inode);
	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
			 called */
 failed:
	return ERR_PTR(err);
}
开发者ID:LiquidSmooth-Devices,项目名称:Deathly_Kernel_D2,代码行数:75,代码来源:inode.c

示例10: gdbio_htable_del

static void gdbio_htable_del(struct gdbio_state *gs){
	spin_lock(&htable_lock); 
	htable_del(gdbio_htable, gs);
	spin_unlock(&htable_lock);
}
开发者ID:HobbesOSR,项目名称:kitten,代码行数:5,代码来源:gdbio.c

示例11: qdisc_lock

/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
 * A requeued skb (via q->gso_skb) can also be a SKB list.
 */
static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
				   int *packets)
{
	const struct netdev_queue *txq = q->dev_queue;
	struct sk_buff *skb = NULL;

	*packets = 1;
	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
		spinlock_t *lock = NULL;

		if (q->flags & TCQ_F_NOLOCK) {
			lock = qdisc_lock(q);
			spin_lock(lock);
		}

		skb = skb_peek(&q->gso_skb);

		/* skb may be null if another cpu pulls gso_skb off in between
		 * empty check and lock.
		 */
		if (!skb) {
			if (lock)
				spin_unlock(lock);
			goto validate;
		}

		/* skb in gso_skb were already validated */
		*validate = false;
		if (xfrm_offload(skb))
			*validate = true;
		/* check the reason of requeuing without tx lock first */
		txq = skb_get_tx_queue(txq->dev, skb);
		if (!netif_xmit_frozen_or_stopped(txq)) {
			skb = __skb_dequeue(&q->gso_skb);
			if (qdisc_is_percpu_stats(q)) {
				qdisc_qstats_cpu_backlog_dec(q, skb);
				qdisc_qstats_cpu_qlen_dec(q);
			} else {
				qdisc_qstats_backlog_dec(q, skb);
				q->q.qlen--;
			}
		} else {
			skb = NULL;
		}
		if (lock)
			spin_unlock(lock);
		goto trace;
	}
validate:
	*validate = true;

	if ((q->flags & TCQ_F_ONETXQUEUE) &&
	    netif_xmit_frozen_or_stopped(txq))
		return skb;

	skb = qdisc_dequeue_skb_bad_txq(q);
	if (unlikely(skb))
		goto bulk;
	skb = q->dequeue(q);
	if (skb) {
bulk:
		if (qdisc_may_bulk(q))
			try_bulk_dequeue_skb(q, skb, txq, packets);
		else
			try_bulk_dequeue_skb_slow(q, skb, packets);
	}
trace:
	trace_qdisc_dequeue(q, txq, *packets, skb);
	return skb;
}
开发者ID:Lyude,项目名称:linux,代码行数:73,代码来源:sch_generic.c

示例12: qat_alg_setkey

static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
                          unsigned int keylen)
{
    struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
    struct device *dev;

    spin_lock(&ctx->lock);
    if (ctx->enc_cd) {
        /* rekeying */
        dev = &GET_DEV(ctx->inst->accel_dev);
        memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
        memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
        memzero_explicit(&ctx->enc_fw_req_tmpl,
                         sizeof(struct icp_qat_fw_la_bulk_req));
        memzero_explicit(&ctx->dec_fw_req_tmpl,
                         sizeof(struct icp_qat_fw_la_bulk_req));
    } else {
        /* new key */
        int node = get_current_node();
        struct qat_crypto_instance *inst =
            qat_crypto_get_instance_node(node);
        if (!inst) {
            spin_unlock(&ctx->lock);
            return -EINVAL;
        }

        dev = &GET_DEV(inst->accel_dev);
        ctx->inst = inst;
        ctx->enc_cd = dma_zalloc_coherent(dev,
                                          sizeof(struct qat_alg_cd),
                                          &ctx->enc_cd_paddr,
                                          GFP_ATOMIC);
        if (!ctx->enc_cd) {
            spin_unlock(&ctx->lock);
            return -ENOMEM;
        }
        ctx->dec_cd = dma_zalloc_coherent(dev,
                                          sizeof(struct qat_alg_cd),
                                          &ctx->dec_cd_paddr,
                                          GFP_ATOMIC);
        if (!ctx->dec_cd) {
            spin_unlock(&ctx->lock);
            goto out_free_enc;
        }
    }
    spin_unlock(&ctx->lock);
    if (qat_alg_init_sessions(ctx, key, keylen))
        goto out_free_all;

    return 0;

out_free_all:
    memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
    dma_free_coherent(dev, sizeof(struct qat_alg_cd),
                      ctx->dec_cd, ctx->dec_cd_paddr);
    ctx->dec_cd = NULL;
out_free_enc:
    memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
    dma_free_coherent(dev, sizeof(struct qat_alg_cd),
                      ctx->enc_cd, ctx->enc_cd_paddr);
    ctx->enc_cd = NULL;
    return -ENOMEM;
}
开发者ID:rebeccawshaw,项目名称:OperatingSystems1,代码行数:63,代码来源:qat_algs.c

示例13: hfi1_vnic_bypass_rcv

void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
{
	struct hfi1_devdata *dd = packet->rcd->dd;
	struct hfi1_vnic_vport_info *vinfo = NULL;
	struct hfi1_vnic_rx_queue *rxq;
	struct sk_buff *skb;
	int l4_type, vesw_id = -1;
	u8 q_idx;

	l4_type = HFI1_GET_L4_TYPE(packet->ebuf);
	if (likely(l4_type == OPA_VNIC_L4_ETHR)) {
		vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf);
		vinfo = idr_find(&dd->vnic.vesw_idr, vesw_id);

		/*
		 * In case of invalid vesw id, count the error on
		 * the first available vport.
		 */
		if (unlikely(!vinfo)) {
			struct hfi1_vnic_vport_info *vinfo_tmp;
			int id_tmp = 0;

			vinfo_tmp =  idr_get_next(&dd->vnic.vesw_idr, &id_tmp);
			if (vinfo_tmp) {
				spin_lock(&vport_cntr_lock);
				vinfo_tmp->stats[0].netstats.rx_nohandler++;
				spin_unlock(&vport_cntr_lock);
			}
		}
	}

	if (unlikely(!vinfo)) {
		dd_dev_warn(dd, "vnic rcv err: l4 %d vesw id %d ctx %d\n",
			    l4_type, vesw_id, packet->rcd->ctxt);
		return;
	}

	q_idx = packet->rcd->vnic_q_idx;
	rxq = &vinfo->rxq[q_idx];
	if (unlikely(!netif_oper_up(vinfo->netdev))) {
		vinfo->stats[q_idx].rx_drop_state++;
		skb_queue_purge(&rxq->skbq);
		return;
	}

	if (unlikely(skb_queue_len(&rxq->skbq) > HFI1_VNIC_RCV_Q_SIZE)) {
		vinfo->stats[q_idx].netstats.rx_fifo_errors++;
		return;
	}

	skb = netdev_alloc_skb(vinfo->netdev, packet->tlen);
	if (unlikely(!skb)) {
		vinfo->stats[q_idx].netstats.rx_fifo_errors++;
		return;
	}

	memcpy(skb->data, packet->ebuf, packet->tlen);
	skb_put(skb, packet->tlen);
	skb_queue_tail(&rxq->skbq, skb);

	if (napi_schedule_prep(&rxq->napi)) {
		v_dbg("napi %d scheduling\n", q_idx);
		__napi_schedule(&rxq->napi);
	}
}
开发者ID:asmalldev,项目名称:linux,代码行数:65,代码来源:vnic_main.c

示例14: ll_getxattr_common

static
int ll_getxattr_common(struct inode *inode, const char *name,
                       void *buffer, size_t size, __u64 valid)
{
        struct ll_sb_info *sbi = ll_i2sbi(inode);
        struct ptlrpc_request *req = NULL;
        struct mdt_body *body;
        int xattr_type, rc;
        void *xdata;
        struct obd_capa *oc;
        struct rmtacl_ctl_entry *rce = NULL;
	struct ll_inode_info *lli = ll_i2info(inode);
        ENTRY;

	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
	       PFID(ll_inode2fid(inode)), inode);

        /* listxattr have slightly different behavior from of ext3:
         * without 'user_xattr' ext3 will list all xattr names but
         * filtered out "^user..*"; we list them all for simplicity.
         */
        if (!name) {
                xattr_type = XATTR_OTHER_T;
                goto do_getxattr;
        }

        xattr_type = get_xattr_type(name);
        rc = xattr_type_filter(sbi, xattr_type);
        if (rc)
                RETURN(rc);

        /* b15587: ignore security.capability xattr for now */
        if ((xattr_type == XATTR_SECURITY_T &&
            strcmp(name, "security.capability") == 0))
                RETURN(-ENODATA);

        /* LU-549:  Disable security.selinux when selinux is disabled */
        if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
            strcmp(name, "security.selinux") == 0)
                RETURN(-EOPNOTSUPP);

#ifdef CONFIG_FS_POSIX_ACL
	if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
	    (xattr_type == XATTR_ACL_ACCESS_T ||
	    xattr_type == XATTR_ACL_DEFAULT_T)) {
		rce = rct_search(&sbi->ll_rct, current_pid());
		if (rce == NULL ||
		    (rce->rce_ops != RMT_LSETFACL &&
		    rce->rce_ops != RMT_LGETFACL &&
		    rce->rce_ops != RMT_RSETFACL &&
		    rce->rce_ops != RMT_RGETFACL))
			RETURN(-EOPNOTSUPP);
	}

        /* posix acl is under protection of LOOKUP lock. when calling to this,
         * we just have path resolution to the target inode, so we have great
         * chance that cached ACL is uptodate.
         */
        if (xattr_type == XATTR_ACL_ACCESS_T &&
            !(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {

                struct posix_acl *acl;

		spin_lock(&lli->lli_lock);
		acl = posix_acl_dup(lli->lli_posix_acl);
		spin_unlock(&lli->lli_lock);

                if (!acl)
                        RETURN(-ENODATA);

                rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
                posix_acl_release(acl);
                RETURN(rc);
        }
        if (xattr_type == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode))
                RETURN(-ENODATA);
#endif

do_getxattr:
	if (sbi->ll_xattr_cache_enabled && xattr_type != XATTR_ACL_ACCESS_T) {
		rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
		if (rc == -EAGAIN)
			goto getxattr_nocache;
		if (rc < 0)
			GOTO(out_xattr, rc);

		/* Add "system.posix_acl_access" to the list */
		if (lli->lli_posix_acl != NULL && valid & OBD_MD_FLXATTRLS) {
			if (size == 0) {
				rc += sizeof(XATTR_NAME_ACL_ACCESS);
			} else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) {
				memcpy(buffer + rc, XATTR_NAME_ACL_ACCESS,
				       sizeof(XATTR_NAME_ACL_ACCESS));
				rc += sizeof(XATTR_NAME_ACL_ACCESS);
			} else {
				GOTO(out_xattr, rc = -ERANGE);
			}
		}
	} else {
getxattr_nocache:
//.........这里部分代码省略.........
开发者ID:Lezval,项目名称:lustre,代码行数:101,代码来源:xattr.c

示例15: xfs_fs_encode_fh

STATIC int
xfs_fs_encode_fh(
	struct dentry		*dentry,
	__u32			*fh,
	int			*max_len,
	int			connectable)
{
	struct fid		*fid = (struct fid *)fh;
	struct xfs_fid64	*fid64 = (struct xfs_fid64 *)fh;
	struct inode		*inode = dentry->d_inode;
	int			fileid_type;
	int			len;

	/* Directories don't need their parent encoded, they have ".." */
	if (S_ISDIR(inode->i_mode) || !connectable)
		fileid_type = FILEID_INO32_GEN;
	else
		fileid_type = FILEID_INO32_GEN_PARENT;

	/*
	 * If the the filesystem may contain 64bit inode numbers, we need
	 * to use larger file handles that can represent them.
	 *
	 * While we only allocate inodes that do not fit into 32 bits any
	 * large enough filesystem may contain them, thus the slightly
	 * confusing looking conditional below.
	 */
	if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS) ||
	    (XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_32BITINODES))
		fileid_type |= XFS_FILEID_TYPE_64FLAG;

	/*
	 * Only encode if there is enough space given.  In practice
	 * this means we can't export a filesystem with 64bit inodes
	 * over NFSv2 with the subtree_check export option; the other
	 * seven combinations work.  The real answer is "don't use v2".
	 */
	len = xfs_fileid_length(fileid_type);
	if (*max_len < len) {
		*max_len = len;
		return 255;
	}
	*max_len = len;

	switch (fileid_type) {
	case FILEID_INO32_GEN_PARENT:
		spin_lock(&dentry->d_lock);
		fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
		fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
		spin_unlock(&dentry->d_lock);
		/*FALLTHRU*/
	case FILEID_INO32_GEN:
		fid->i32.ino = inode->i_ino;
		fid->i32.gen = inode->i_generation;
		break;
	case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
		spin_lock(&dentry->d_lock);
		fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
		fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
		spin_unlock(&dentry->d_lock);
		/*FALLTHRU*/
	case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
		fid64->ino = inode->i_ino;
		fid64->gen = inode->i_generation;
		break;
	}

	return fileid_type;
}
开发者ID:119-org,项目名称:hi3518-osdrv,代码行数:69,代码来源:xfs_export.c


注:本文中的spin_unlock函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。