本文整理汇总了C++中CDEBUG函数的典型用法代码示例。如果您正苦于以下问题:C++ CDEBUG函数的具体用法?C++ CDEBUG怎么用?C++ CDEBUG使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了CDEBUG函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ll_getxattr_common
static
int ll_getxattr_common(struct inode *inode, const char *name,
void *buffer, size_t size, __u64 valid)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *req = NULL;
struct mdt_body *body;
int xattr_type, rc;
void *xdata;
struct obd_capa *oc;
struct rmtacl_ctl_entry *rce = NULL;
struct ll_inode_info *lli = ll_i2info(inode);
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
inode->i_ino, inode->i_generation, inode);
/* listxattr have slightly different behavior from of ext3:
* without 'user_xattr' ext3 will list all xattr names but
* filtered out "^user..*"; we list them all for simplicity.
*/
if (!name) {
xattr_type = XATTR_OTHER_T;
goto do_getxattr;
}
xattr_type = get_xattr_type(name);
rc = xattr_type_filter(sbi, xattr_type);
if (rc)
return rc;
/* b15587: ignore security.capability xattr for now */
if ((xattr_type == XATTR_SECURITY_T &&
strcmp(name, "security.capability") == 0))
return -ENODATA;
/* LU-549: Disable security.selinux when selinux is disabled */
if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
strcmp(name, "security.selinux") == 0)
return -EOPNOTSUPP;
#ifdef CONFIG_FS_POSIX_ACL
if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
(xattr_type == XATTR_ACL_ACCESS_T ||
xattr_type == XATTR_ACL_DEFAULT_T)) {
rce = rct_search(&sbi->ll_rct, current_pid());
if (rce == NULL ||
(rce->rce_ops != RMT_LSETFACL &&
rce->rce_ops != RMT_LGETFACL &&
rce->rce_ops != RMT_RSETFACL &&
rce->rce_ops != RMT_RGETFACL))
return -EOPNOTSUPP;
}
/* posix acl is under protection of LOOKUP lock. when calling to this,
* we just have path resolution to the target inode, so we have great
* chance that cached ACL is uptodate.
*/
if (xattr_type == XATTR_ACL_ACCESS_T &&
!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
struct posix_acl *acl;
spin_lock(&lli->lli_lock);
acl = posix_acl_dup(lli->lli_posix_acl);
spin_unlock(&lli->lli_lock);
if (!acl)
return -ENODATA;
rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
posix_acl_release(acl);
return rc;
}
if (xattr_type == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode))
return -ENODATA;
#endif
do_getxattr:
if (sbi->ll_xattr_cache_enabled && xattr_type != XATTR_ACL_ACCESS_T) {
rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
if (rc == -EAGAIN)
goto getxattr_nocache;
if (rc < 0)
goto out_xattr;
/* Add "system.posix_acl_access" to the list */
if (lli->lli_posix_acl != NULL && valid & OBD_MD_FLXATTRLS) {
if (size == 0) {
rc += sizeof(XATTR_NAME_ACL_ACCESS);
} else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) {
memcpy(buffer + rc, XATTR_NAME_ACL_ACCESS,
sizeof(XATTR_NAME_ACL_ACCESS));
rc += sizeof(XATTR_NAME_ACL_ACCESS);
} else {
rc = -ERANGE;
goto out_xattr;
}
}
} else {
getxattr_nocache:
//.........这里部分代码省略.........
示例2: llog_process_thread
static int llog_process_thread(void *arg)
{
struct llog_process_info *lpi = arg;
struct llog_handle *loghandle = lpi->lpi_loghandle;
struct llog_log_hdr *llh = loghandle->lgh_hdr;
struct llog_process_cat_data *cd = lpi->lpi_catdata;
char *buf;
__u64 cur_offset = LLOG_CHUNK_SIZE;
__u64 last_offset;
int rc = 0, index = 1, last_index;
int saved_index = 0;
int last_called_index = 0;
LASSERT(llh);
OBD_ALLOC(buf, LLOG_CHUNK_SIZE);
if (!buf) {
lpi->lpi_rc = -ENOMEM;
return 0;
}
if (cd != NULL) {
last_called_index = cd->lpcd_first_idx;
index = cd->lpcd_first_idx + 1;
}
if (cd != NULL && cd->lpcd_last_idx)
last_index = cd->lpcd_last_idx;
else
last_index = LLOG_BITMAP_BYTES * 8 - 1;
while (rc == 0) {
struct llog_rec_hdr *rec;
/* skip records not set in bitmap */
while (index <= last_index &&
!ext2_test_bit(index, llh->llh_bitmap))
++index;
LASSERT(index <= last_index + 1);
if (index == last_index + 1)
break;
repeat:
CDEBUG(D_OTHER, "index: %d last_index %d\n",
index, last_index);
/* get the buf with our target record; avoid old garbage */
memset(buf, 0, LLOG_CHUNK_SIZE);
last_offset = cur_offset;
rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index,
index, &cur_offset, buf, LLOG_CHUNK_SIZE);
if (rc)
GOTO(out, rc);
/* NB: when rec->lrh_len is accessed it is already swabbed
* since it is used at the "end" of the loop and the rec
* swabbing is done at the beginning of the loop. */
for (rec = (struct llog_rec_hdr *)buf;
(char *)rec < buf + LLOG_CHUNK_SIZE;
rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)){
CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
rec, rec->lrh_type);
if (LLOG_REC_HDR_NEEDS_SWABBING(rec))
lustre_swab_llog_rec(rec);
CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n",
rec->lrh_type, rec->lrh_index);
if (rec->lrh_index == 0) {
/* probably another rec just got added? */
if (index <= loghandle->lgh_last_idx)
GOTO(repeat, rc = 0);
GOTO(out, rc = 0); /* no more records */
}
if (rec->lrh_len == 0 ||
rec->lrh_len > LLOG_CHUNK_SIZE) {
CWARN("invalid length %d in llog record for "
"index %d/%d\n", rec->lrh_len,
rec->lrh_index, index);
GOTO(out, rc = -EINVAL);
}
if (rec->lrh_index < index) {
CDEBUG(D_OTHER, "skipping lrh_index %d\n",
rec->lrh_index);
continue;
}
CDEBUG(D_OTHER,
"lrh_index: %d lrh_len: %d (%d remains)\n",
rec->lrh_index, rec->lrh_len,
(int)(buf + LLOG_CHUNK_SIZE - (char *)rec));
loghandle->lgh_cur_idx = rec->lrh_index;
loghandle->lgh_cur_offset = (char *)rec - (char *)buf +
last_offset;
/* if set, process the callback on this record */
if (ext2_test_bit(index, llh->llh_bitmap)) {
//.........这里部分代码省略.........
示例3: request_in_callback
/*
* Server's incoming request callback
*/
void request_in_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
struct ptlrpc_service *service = svcpt->scp_service;
struct ptlrpc_request *req;
LASSERT(ev->type == LNET_EVENT_PUT ||
ev->type == LNET_EVENT_UNLINK);
LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer);
LASSERT((char *)ev->md.start + ev->offset + ev->mlength <=
rqbd->rqbd_buffer + service->srv_buf_size);
CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
"event type %d, status %d, service %s\n",
ev->type, ev->status, service->srv_name);
if (ev->unlinked) {
/* If this is the last request message to fit in the
* request buffer we can use the request object embedded in
* rqbd. Note that if we failed to allocate a request,
* we'd have to re-post the rqbd, which we can't do in this
* context. */
req = &rqbd->rqbd_req;
memset(req, 0, sizeof(*req));
} else {
LASSERT(ev->type == LNET_EVENT_PUT);
if (ev->status != 0) {
/* We moaned above already... */
return;
}
req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
if (req == NULL) {
CERROR("Can't allocate incoming request descriptor: "
"Dropping %s RPC from %s\n",
service->srv_name,
libcfs_id2str(ev->initiator));
return;
}
}
/* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
* flags are reset and scalars are zero. We only set the message
* size to non-zero if this was a successful receive. */
req->rq_xid = ev->match_bits;
req->rq_reqbuf = ev->md.start + ev->offset;
if (ev->type == LNET_EVENT_PUT && ev->status == 0)
req->rq_reqdata_len = ev->mlength;
do_gettimeofday(&req->rq_arrival_time);
req->rq_peer = ev->initiator;
req->rq_self = ev->target.nid;
req->rq_rqbd = rqbd;
req->rq_phase = RQ_PHASE_NEW;
spin_lock_init(&req->rq_lock);
INIT_LIST_HEAD(&req->rq_timed_list);
INIT_LIST_HEAD(&req->rq_exp_list);
atomic_set(&req->rq_refcount, 1);
if (ev->type == LNET_EVENT_PUT)
CDEBUG(D_INFO, "incoming [email protected]%p x%llu msgsize %u\n",
req, req->rq_xid, ev->mlength);
CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
spin_lock(&svcpt->scp_lock);
ptlrpc_req_add_history(svcpt, req);
if (ev->unlinked) {
svcpt->scp_nrqbds_posted--;
CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
svcpt->scp_nrqbds_posted);
/* Normally, don't complain about 0 buffers posted; LNET won't
* drop incoming reqs since we set the portal lazy */
if (test_req_buffer_pressure &&
ev->type != LNET_EVENT_UNLINK &&
svcpt->scp_nrqbds_posted == 0)
CWARN("All %s request buffers busy\n",
service->srv_name);
/* req takes over the network's ref on rqbd */
} else {
/* req takes a ref on rqbd */
rqbd->rqbd_refcount++;
}
list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
svcpt->scp_nreqs_incoming++;
/* NB everything can disappear under us once the request
* has been queued and we unlock, so do the wake now... */
wake_up(&svcpt->scp_waitq);
spin_unlock(&svcpt->scp_lock);
}
示例4: out_attr_get
static int out_attr_get(struct tgt_session_info *tsi)
{
const struct lu_env *env = tsi->tsi_env;
struct tgt_thread_info *tti = tgt_th_info(env);
struct obdo *obdo = &tti->tti_u.update.tti_obdo;
struct lu_attr *la = &tti->tti_attr;
struct dt_object *obj = tti->tti_u.update.tti_dt_object;
int idx = tti->tti_u.update.tti_update_reply_index;
int rc;
ENTRY;
if (!lu_object_exists(&obj->do_lu)) {
/* Usually, this will be called when the master MDT try
* to init a remote object(see osp_object_init), so if
* the object does not exist on slave, we need set BANSHEE flag,
* so the object can be removed from the cache immediately */
set_bit(LU_OBJECT_HEARD_BANSHEE,
&obj->do_lu.lo_header->loh_flags);
RETURN(-ENOENT);
}
dt_read_lock(env, obj, MOR_TGT_CHILD);
rc = dt_attr_get(env, obj, la, NULL);
if (rc)
GOTO(out_unlock, rc);
/*
* If it is a directory, we will also check whether the
* directory is empty.
* la_flags = 0 : Empty.
* = 1 : Not empty.
*/
la->la_flags = 0;
if (S_ISDIR(la->la_mode)) {
struct dt_it *it;
const struct dt_it_ops *iops;
if (!dt_try_as_dir(env, obj))
GOTO(out_unlock, rc = -ENOTDIR);
iops = &obj->do_index_ops->dio_it;
it = iops->init(env, obj, LUDA_64BITHASH, BYPASS_CAPA);
if (!IS_ERR(it)) {
int result;
result = iops->get(env, it, (const void *)"");
if (result > 0) {
int i;
for (result = 0, i = 0; result == 0 && i < 3;
++i)
result = iops->next(env, it);
if (result == 0)
la->la_flags = 1;
} else if (result == 0)
/*
* Huh? Index contains no zero key?
*/
rc = -EIO;
iops->put(env, it);
iops->fini(env, it);
}
}
obdo->o_valid = 0;
obdo_from_la(obdo, la, la->la_valid);
obdo_cpu_to_le(obdo, obdo);
lustre_set_wire_obdo(NULL, obdo, obdo);
out_unlock:
dt_read_unlock(env, obj);
CDEBUG(D_INFO, "%s: insert attr get reply %p index %d: rc = %d\n",
tgt_name(tsi->tsi_tgt), tti->tti_u.update.tti_update_reply,
0, rc);
object_update_result_insert(tti->tti_u.update.tti_update_reply, obdo,
sizeof(*obdo), idx, rc);
RETURN(rc);
}
示例5: lquota_info
/*
* Look-up/create a global index file.
*
* \param env - is the environment passed by the caller
* \parap dev - is the dt_device where to lookup/create the global index file
* \param parent - is the parent directory where to create the global index if
* not found
* \param fid - is the fid of the global index to be looked up/created
* \parap local - indicates whether the index should be created with a local
* generated fid or with \fid
*
* \retval - pointer to the dt_object of the global index on success,
* appropriate error on failure
*/
struct dt_object *lquota_disk_glb_find_create(const struct lu_env *env,
struct dt_device *dev,
struct dt_object *parent,
struct lu_fid *fid, bool local)
{
struct lquota_thread_info *qti = lquota_info(env);
struct dt_object *glb_idx;
const struct dt_index_features *idx_feat;
ENTRY;
CDEBUG(D_QUOTA, "look-up/create %sglobal idx file ("DFID")\n",
local ? "local " : "", PFID(fid));
#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 53, 0)
/* we use different index feature for each quota type and target type
* for the time being. This is done for on-disk conversion from the old
* quota format. Once this is no longer required, we should just be
* using dt_quota_glb_features for all global index file */
idx_feat = glb_idx_feature(fid);
#else
idx_feat = &dt_quota_glb_features;
#endif
/* the filename is composed of the most signicant bits of the FID,
* that's to say the oid which encodes the pool id, pool type and quota
* type */
sprintf(qti->qti_buf, "0x%x", fid->f_oid);
if (local) {
/* We use the sequence reserved for local named objects */
lu_local_name_obj_fid(&qti->qti_fid, 1);
glb_idx = lquota_disk_find_create(env, dev, parent,
&qti->qti_fid, idx_feat,
qti->qti_buf);
} else {
/* look-up/create global index on disk */
glb_idx = local_index_find_or_create_with_fid(env, dev, fid,
parent,
qti->qti_buf,
LQUOTA_MODE,
idx_feat);
}
if (IS_ERR(glb_idx)) {
CERROR("%s: failed to look-up/create idx file "DFID" rc:%ld "
"local:%d\n", dev->dd_lu_dev.ld_obd->obd_name,
PFID(fid), PTR_ERR(glb_idx), local);
RETURN(glb_idx);
}
/* install index operation vector */
if (glb_idx->do_index_ops == NULL) {
int rc;
rc = glb_idx->do_ops->do_index_try(env, glb_idx, idx_feat);
if (rc) {
CERROR("%s: failed to setup index operations for "DFID
" rc:%d\n", dev->dd_lu_dev.ld_obd->obd_name,
PFID(lu_object_fid(&glb_idx->do_lu)), rc);
lu_object_put(env, &glb_idx->do_lu);
glb_idx = ERR_PTR(rc);
}
}
RETURN(glb_idx);
}
示例6: lru_size_store
static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
const char *buffer, size_t count)
{
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
unsigned long tmp;
int lru_resize;
int err;
if (strncmp(buffer, "clear", 5) == 0) {
CDEBUG(D_DLMTRACE,
"dropping all unused locks from namespace %s\n",
ldlm_ns_name(ns));
if (ns_connect_lru_resize(ns)) {
int canceled, unused = ns->ns_nr_unused;
/* Try to cancel all @ns_nr_unused locks. */
canceled = ldlm_cancel_lru(ns, unused, 0,
LDLM_CANCEL_PASSED);
if (canceled < unused) {
CDEBUG(D_DLMTRACE,
"not all requested locks are canceled, requested: %d, canceled: %d\n",
unused,
canceled);
return -EINVAL;
}
} else {
tmp = ns->ns_max_unused;
ns->ns_max_unused = 0;
ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
ns->ns_max_unused = tmp;
}
return count;
}
err = kstrtoul(buffer, 10, &tmp);
if (err != 0) {
CERROR("lru_size: invalid value written\n");
return -EINVAL;
}
lru_resize = (tmp == 0);
if (ns_connect_lru_resize(ns)) {
if (!lru_resize)
ns->ns_max_unused = (unsigned int)tmp;
if (tmp > ns->ns_nr_unused)
tmp = ns->ns_nr_unused;
tmp = ns->ns_nr_unused - tmp;
CDEBUG(D_DLMTRACE,
"changing namespace %s unused locks from %u to %u\n",
ldlm_ns_name(ns), ns->ns_nr_unused,
(unsigned int)tmp);
ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
if (!lru_resize) {
CDEBUG(D_DLMTRACE,
"disable lru_resize for namespace %s\n",
ldlm_ns_name(ns));
ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
}
} else {
CDEBUG(D_DLMTRACE,
"changing namespace %s max_unused from %u to %u\n",
ldlm_ns_name(ns), ns->ns_max_unused,
(unsigned int)tmp);
ns->ns_max_unused = (unsigned int)tmp;
ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
/* Make sure that LRU resize was originally supported before
* turning it on here.
*/
if (lru_resize &&
(ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
CDEBUG(D_DLMTRACE,
"enable lru_resize for namespace %s\n",
ldlm_ns_name(ns));
ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
}
}
return count;
}
示例7: lustre_start_mgc
//.........这里部分代码省略.........
/* Re-using an existing MGC */
atomic_inc(&obd->u.cli.cl_mgc_refcount);
/* IR compatibility check, only for clients */
if (lmd_is_client(lsi->lsi_lmd)) {
int has_ir;
int vallen = sizeof(*data);
__u32 *flags = &lsi->lsi_lmd->lmd_flags;
rc = obd_get_info(NULL, obd->obd_self_export,
strlen(KEY_CONN_DATA), KEY_CONN_DATA,
&vallen, data, NULL);
LASSERT(rc == 0);
has_ir = OCD_HAS_FLAG(data, IMP_RECOV);
if (has_ir ^ !(*flags & LMD_FLG_NOIR)) {
/* LMD_FLG_NOIR is for test purpose only */
LCONSOLE_WARN(
"Trying to mount a client with IR setting "
"not compatible with current mgc. "
"Force to use current mgc setting that is "
"IR %s.\n",
has_ir ? "enabled" : "disabled");
if (has_ir)
*flags &= ~LMD_FLG_NOIR;
else
*flags |= LMD_FLG_NOIR;
}
}
recov_bk = 0;
/* If we are restarting the MGS, don't try to keep the MGC's
old connection, or registration will fail. */
if (IS_MGS(lsi)) {
CDEBUG(D_MOUNT, "New MGS with live MGC\n");
recov_bk = 1;
}
/* Try all connections, but only once (again).
We don't want to block another target from starting
(using its local copy of the log), but we do want to connect
if at all possible. */
recov_bk++;
CDEBUG(D_MOUNT, "%s: Set MGC reconnect %d\n", mgcname,
recov_bk);
rc = obd_set_info_async(NULL, obd->obd_self_export,
sizeof(KEY_INIT_RECOV_BACKUP),
KEY_INIT_RECOV_BACKUP,
sizeof(recov_bk), &recov_bk, NULL);
rc = 0;
goto out;
}
CDEBUG(D_MOUNT, "Start MGC '%s'\n", mgcname);
/* Add the primary nids for the MGS */
i = 0;
sprintf(niduuid, "%s_%x", mgcname, i);
if (IS_SERVER(lsi)) {
ptr = lsi->lsi_lmd->lmd_mgs;
if (IS_MGS(lsi)) {
/* Use local nids (including LO) */
lnet_process_id_t id;
while ((rc = LNetGetId(i++, &id)) != -ENOENT) {
rc = do_lcfg(mgcname, id.nid,
LCFG_ADD_UUID, niduuid,
NULL, NULL, NULL);
示例8: lov_init_sub
static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
struct cl_object *stripe, struct lov_layout_raid0 *r0,
int idx)
{
struct cl_object_header *hdr;
struct cl_object_header *subhdr;
struct cl_object_header *parent;
struct lov_oinfo *oinfo;
int result;
if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
/* For sanity:test_206.
* Do not leave the object in cache to avoid accessing
* freed memory. This is because osc_object is referring to
* lov_oinfo of lsm_stripe_data which will be freed due to
* this failure.
*/
cl_object_kill(env, stripe);
cl_object_put(env, stripe);
return -EIO;
}
hdr = cl_object_header(lov2cl(lov));
subhdr = cl_object_header(stripe);
oinfo = lov->lo_lsm->lsm_oinfo[idx];
CDEBUG(D_INODE, DFID"@%p[%d] -> "DFID"@%p: ostid: "DOSTID
" idx: %d gen: %d\n",
PFID(&subhdr->coh_lu.loh_fid), subhdr, idx,
PFID(&hdr->coh_lu.loh_fid), hdr, POSTID(&oinfo->loi_oi),
oinfo->loi_ost_idx, oinfo->loi_ost_gen);
/* reuse ->coh_attr_guard to protect coh_parent change */
spin_lock(&subhdr->coh_attr_guard);
parent = subhdr->coh_parent;
if (!parent) {
subhdr->coh_parent = hdr;
spin_unlock(&subhdr->coh_attr_guard);
subhdr->coh_nesting = hdr->coh_nesting + 1;
lu_object_ref_add(&stripe->co_lu, "lov-parent", lov);
r0->lo_sub[idx] = cl2lovsub(stripe);
r0->lo_sub[idx]->lso_super = lov;
r0->lo_sub[idx]->lso_index = idx;
result = 0;
} else {
struct lu_object *old_obj;
struct lov_object *old_lov;
unsigned int mask = D_INODE;
spin_unlock(&subhdr->coh_attr_guard);
old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
LASSERT(old_obj);
old_lov = cl2lov(lu2cl(old_obj));
if (old_lov->lo_layout_invalid) {
/* the object's layout has already changed but isn't
* refreshed
*/
lu_object_unhash(env, &stripe->co_lu);
result = -EAGAIN;
} else {
mask = D_ERROR;
result = -EIO;
}
LU_OBJECT_DEBUG(mask, env, &stripe->co_lu,
"stripe %d is already owned.", idx);
LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
cl_object_put(env, stripe);
}
return result;
}
示例9: lov_layout_change
static int lov_layout_change(const struct lu_env *unused,
struct lov_object *lov,
const struct cl_object_conf *conf)
{
int result;
enum lov_layout_type llt = LLT_EMPTY;
union lov_layout_state *state = &lov->u;
const struct lov_layout_operations *old_ops;
const struct lov_layout_operations *new_ops;
void *cookie;
struct lu_env *env;
int refcheck;
LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
if (conf->u.coc_md)
llt = lov_type(conf->u.coc_md->lsm);
LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
cookie = cl_env_reenter();
env = cl_env_get(&refcheck);
if (IS_ERR(env)) {
cl_env_reexit(cookie);
return PTR_ERR(env);
}
CDEBUG(D_INODE, DFID" from %s to %s\n",
PFID(lu_object_fid(lov2lu(lov))),
llt2str(lov->lo_type), llt2str(llt));
old_ops = &lov_dispatch[lov->lo_type];
new_ops = &lov_dispatch[llt];
result = cl_object_prune(env, &lov->lo_cl);
if (result != 0)
goto out;
result = old_ops->llo_delete(env, lov, &lov->u);
if (result == 0) {
old_ops->llo_fini(env, lov, &lov->u);
LASSERT(atomic_read(&lov->lo_active_ios) == 0);
lov->lo_type = LLT_EMPTY;
result = new_ops->llo_init(env,
lu2lov_dev(lov->lo_cl.co_lu.lo_dev),
lov, conf, state);
if (result == 0) {
new_ops->llo_install(env, lov, state);
lov->lo_type = llt;
} else {
new_ops->llo_delete(env, lov, state);
new_ops->llo_fini(env, lov, state);
/* this file becomes an EMPTY file. */
}
}
out:
cl_env_put(env, &refcheck);
cl_env_reexit(cookie);
return result;
}
示例10: ll_page_mkwrite0
/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
bool *retry)
{
struct lu_env *env;
struct cl_io *io;
struct vvp_io *vio;
int result;
__u16 refcheck;
sigset_t set;
struct inode *inode;
struct ll_inode_info *lli;
ENTRY;
LASSERT(vmpage != NULL);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(PTR_ERR(env));
io = ll_fault_io_init(env, vma, vmpage->index, NULL);
if (IS_ERR(io))
GOTO(out, result = PTR_ERR(io));
result = io->ci_result;
if (result < 0)
GOTO(out_io, result);
io->u.ci_fault.ft_mkwrite = 1;
io->u.ci_fault.ft_writable = 1;
vio = vvp_env_io(env);
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_vmpage = vmpage;
set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
inode = vvp_object_inode(io->ci_obj);
lli = ll_i2info(inode);
result = cl_io_loop(env, io);
cfs_restore_sigs(set);
if (result == 0) {
lock_page(vmpage);
if (vmpage->mapping == NULL) {
unlock_page(vmpage);
/* page was truncated and lock was cancelled, return
* ENODATA so that VM_FAULT_NOPAGE will be returned
* to handle_mm_fault(). */
if (result == 0)
result = -ENODATA;
} else if (!PageDirty(vmpage)) {
/* race, the page has been cleaned by ptlrpcd after
* it was unlocked, it has to be added into dirty
* cache again otherwise this soon-to-dirty page won't
* consume any grants, even worse if this page is being
* transferred because it will break RPC checksum.
*/
unlock_page(vmpage);
CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
"been written out, retry.\n",
vmpage, vmpage->index);
*retry = true;
result = -EAGAIN;
}
if (result == 0)
ll_file_set_flag(lli, LLIF_DATA_MODIFIED);
}
EXIT;
out_io:
cl_io_fini(env, io);
out:
cl_env_put(env, &refcheck);
CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
LASSERT(ergo(result == 0, PageLocked(vmpage)));
return result;
}
示例11: ll_fault0
/**
* Lustre implementation of a vm_operations_struct::fault() method, called by
* VM to server page fault (both in kernel and user space).
*
* \param vma - is virtiual area struct related to page fault
* \param vmf - structure which describe type and address where hit fault
*
* \return allocated and filled _locked_ page for address
* \retval VM_FAULT_ERROR on general error
* \retval NOPAGE_OOM not have memory for allocate new page
*/
static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct lu_env *env;
struct cl_io *io;
struct vvp_io *vio = NULL;
struct page *vmpage;
unsigned long ra_flags;
int result = 0;
int fault_ret = 0;
__u16 refcheck;
ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(PTR_ERR(env));
if (ll_sbi_has_fast_read(ll_i2sbi(file_inode(vma->vm_file)))) {
/* do fast fault */
ll_cl_add(vma->vm_file, env, NULL, LCC_MMAP);
fault_ret = filemap_fault(vma, vmf);
ll_cl_remove(vma->vm_file, env);
/* - If there is no error, then the page was found in cache and
* uptodate;
* - If VM_FAULT_RETRY is set, the page existed but failed to
* lock. It will return to kernel and retry;
* - Otherwise, it should try normal fault under DLM lock. */
if ((fault_ret & VM_FAULT_RETRY) ||
!(fault_ret & VM_FAULT_ERROR))
GOTO(out, result = 0);
fault_ret = 0;
}
io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
if (IS_ERR(io))
GOTO(out, result = PTR_ERR(io));
result = io->ci_result;
if (result == 0) {
vio = vvp_env_io(env);
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_vmpage = NULL;
vio->u.fault.ft_vmf = vmf;
vio->u.fault.ft_flags = 0;
vio->u.fault.ft_flags_valid = 0;
/* May call ll_readpage() */
ll_cl_add(vma->vm_file, env, io, LCC_MMAP);
result = cl_io_loop(env, io);
ll_cl_remove(vma->vm_file, env);
/* ft_flags are only valid if we reached
* the call to filemap_fault */
if (vio->u.fault.ft_flags_valid)
fault_ret = vio->u.fault.ft_flags;
vmpage = vio->u.fault.ft_vmpage;
if (result != 0 && vmpage != NULL) {
put_page(vmpage);
vmf->page = NULL;
}
}
cl_io_fini(env, io);
vma->vm_flags |= ra_flags;
out:
cl_env_put(env, &refcheck);
if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
fault_ret |= to_fault_error(result);
CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
RETURN(fault_ret);
}
示例12: pool_getref
/**
* Get a reference on the specified pool.
*
* To ensure the pool descriptor is not freed before the caller is finished
* with it. Any process that is accessing \a pool directly needs to hold
* reference on it, including /proc since a userspace thread may be holding
* the /proc file open and busy in the kernel.
*
* \param[in] pool pool descriptor on which to gain reference
*/
static void pool_getref(struct pool_desc *pool)
{
CDEBUG(D_INFO, "pool %p\n", pool);
atomic_inc(&pool->pool_refcount);
}
示例13: ll_listxattr
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct inode *inode = dentry->d_inode;
int rc = 0, rc2 = 0;
struct lov_mds_md *lmm = NULL;
struct ptlrpc_request *request = NULL;
int lmmsize;
LASSERT(inode);
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
inode->i_ino, inode->i_generation, inode);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1);
rc = ll_getxattr_common(inode, NULL, buffer, size, OBD_MD_FLXATTRLS);
if (rc < 0)
goto out;
if (buffer != NULL) {
struct ll_sb_info *sbi = ll_i2sbi(inode);
char *xattr_name = buffer;
int xlen, rem = rc;
while (rem > 0) {
xlen = strnlen(xattr_name, rem - 1) + 1;
rem -= xlen;
if (xattr_type_filter(sbi,
get_xattr_type(xattr_name)) == 0) {
/* skip OK xattr type
* leave it in buffer
*/
xattr_name += xlen;
continue;
}
/* move up remaining xattrs in buffer
* removing the xattr that is not OK
*/
memmove(xattr_name, xattr_name + xlen, rem);
rc -= xlen;
}
}
if (S_ISREG(inode->i_mode)) {
if (!ll_i2info(inode)->lli_has_smd)
rc2 = -1;
} else if (S_ISDIR(inode->i_mode)) {
rc2 = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
}
if (rc2 < 0) {
rc2 = 0;
goto out;
} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) {
const int prefix_len = sizeof(XATTR_LUSTRE_PREFIX) - 1;
const size_t name_len = sizeof("lov") - 1;
const size_t total_len = prefix_len + name_len + 1;
if (((rc + total_len) > size) && (buffer != NULL)) {
ptlrpc_req_finished(request);
return -ERANGE;
}
if (buffer != NULL) {
buffer += rc;
memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len);
memcpy(buffer + prefix_len, "lov", name_len);
buffer[prefix_len + name_len] = '\0';
}
rc2 = total_len;
}
out:
ptlrpc_req_finished(request);
rc = rc + rc2;
return rc;
}
示例14: ll_getxattr
ssize_t ll_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size)
{
struct inode *inode = dentry->d_inode;
LASSERT(inode);
LASSERT(name);
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
inode->i_ino, inode->i_generation, inode, name);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
if ((strncmp(name, XATTR_TRUSTED_PREFIX,
sizeof(XATTR_TRUSTED_PREFIX) - 1) == 0 &&
strcmp(name + sizeof(XATTR_TRUSTED_PREFIX) - 1, "lov") == 0) ||
(strncmp(name, XATTR_LUSTRE_PREFIX,
sizeof(XATTR_LUSTRE_PREFIX) - 1) == 0 &&
strcmp(name + sizeof(XATTR_LUSTRE_PREFIX) - 1, "lov") == 0)) {
struct lov_stripe_md *lsm;
struct lov_user_md *lump;
struct lov_mds_md *lmm = NULL;
struct ptlrpc_request *request = NULL;
int rc = 0, lmmsize = 0;
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
return -ENODATA;
if (size == 0 && S_ISDIR(inode->i_mode)) {
/* XXX directory EA is fix for now, optimize to save
* RPC transfer */
rc = sizeof(struct lov_user_md);
goto out;
}
lsm = ccc_inode_lsm_get(inode);
if (lsm == NULL) {
if (S_ISDIR(inode->i_mode)) {
rc = ll_dir_getstripe(inode, &lmm,
&lmmsize, &request);
} else {
rc = -ENODATA;
}
} else {
/* LSM is present already after lookup/getattr call.
* we need to grab layout lock once it is implemented */
rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm);
lmmsize = rc;
}
ccc_inode_lsm_put(inode, lsm);
if (rc < 0)
goto out;
if (size == 0) {
/* used to call ll_get_max_mdsize() forward to get
* the maximum buffer size, while some apps (such as
* rsync 3.0.x) care much about the exact xattr value
* size */
rc = lmmsize;
goto out;
}
if (size < lmmsize) {
CERROR("server bug: replied size %d > %d for %s (%s)\n",
lmmsize, (int)size, dentry->d_name.name, name);
rc = -ERANGE;
goto out;
}
lump = (struct lov_user_md *)buffer;
memcpy(lump, lmm, lmmsize);
/* do not return layout gen for getxattr otherwise it would
* confuse tar --xattr by recognizing layout gen as stripe
* offset when the file is restored. See LU-2809. */
lump->lmm_layout_gen = 0;
rc = lmmsize;
out:
if (request)
ptlrpc_req_finished(request);
else if (lmm)
obd_free_diskmd(ll_i2dtexp(inode), &lmm);
return rc;
}
return ll_getxattr_common(inode, name, buffer, size, OBD_MD_FLXATTR);
}
示例15: ofd_lvbo_init
/**
* Implementation of ldlm_valblock_ops::lvbo_init for OFD.
*
* This function allocates and initializes new LVB data for the given
* LDLM resource if it is not allocated yet. New LVB is filled with attributes
* of the object associated with that resource. Function does nothing if LVB
* for the given LDLM resource is allocated already.
*
* Called with res->lr_lvb_sem held.
*
* \param[in] lock LDLM lock on resource
*
* \retval 0 on successful setup
* \retval negative value on error
*/
static int ofd_lvbo_init(const struct lu_env *env, struct ldlm_resource *res)
{
struct ost_lvb *lvb;
struct ofd_device *ofd;
struct ofd_object *fo;
struct ofd_thread_info *info;
struct lu_env _env;
int rc = 0;
ENTRY;
LASSERT(res);
LASSERT(mutex_is_locked(&res->lr_lvb_mutex));
if (res->lr_lvb_data != NULL)
RETURN(0);
ofd = ldlm_res_to_ns(res)->ns_lvbp;
LASSERT(ofd != NULL);
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_LVB))
RETURN(-ENOMEM);
if (!env) {
rc = lu_env_init(&_env, LCT_DT_THREAD);
if (rc)
RETURN(rc);
env = &_env;
}
OBD_ALLOC_PTR(lvb);
if (lvb == NULL)
GOTO(out, rc = -ENOMEM);
info = ofd_info(env);
res->lr_lvb_data = lvb;
res->lr_lvb_len = sizeof(*lvb);
ost_fid_from_resid(&info->fti_fid, &res->lr_name,
ofd->ofd_lut.lut_lsd.lsd_osd_index);
fo = ofd_object_find(env, ofd, &info->fti_fid);
if (IS_ERR(fo))
GOTO(out_lvb, rc = PTR_ERR(fo));
rc = ofd_attr_get(env, fo, &info->fti_attr);
if (rc) {
struct ofd_seq *oseq;
__u64 seq;
/* Object could be recreated during the first
* CLEANUP_ORPHAN request. */
if (rc == -ENOENT) {
seq = fid_seq(&info->fti_fid);
oseq = ofd_seq_load(env, ofd, fid_seq_is_idif(seq) ?
FID_SEQ_OST_MDT0 : seq);
if (!IS_ERR_OR_NULL(oseq)) {
if (!oseq->os_last_id_synced)
rc = -EAGAIN;
ofd_seq_put(env, oseq);
}
}
GOTO(out_obj, rc);
}
lvb->lvb_size = info->fti_attr.la_size;
lvb->lvb_blocks = info->fti_attr.la_blocks;
lvb->lvb_mtime = info->fti_attr.la_mtime;
lvb->lvb_atime = info->fti_attr.la_atime;
lvb->lvb_ctime = info->fti_attr.la_ctime;
CDEBUG(D_DLMTRACE, "res: "DFID" initial lvb size: %llu, "
"mtime: %#llx, blocks: %#llx\n",
PFID(&info->fti_fid), lvb->lvb_size,
lvb->lvb_mtime, lvb->lvb_blocks);
info->fti_attr.la_valid = 0;
EXIT;
out_obj:
ofd_object_put(env, fo);
out_lvb:
if (rc != 0)
OST_LVB_SET_ERR(lvb->lvb_blocks, rc);
out:
/* Don't free lvb data on lookup error */
if (env && env == &_env)
//.........这里部分代码省略.........