本文整理汇总了C++中LASSERTF函数的典型用法代码示例。如果您正苦于以下问题:C++ LASSERTF函数的具体用法?C++ LASSERTF怎么用?C++ LASSERTF使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了LASSERTF函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: null_free_reqbuf
static
void null_free_reqbuf(struct ptlrpc_sec *sec,
struct ptlrpc_request *req)
{
if (!req->rq_pool) {
LASSERTF(req->rq_reqmsg == req->rq_reqbuf,
"req %p: reqmsg %p is not reqbuf %p in null sec\n",
req, req->rq_reqmsg, req->rq_reqbuf);
LASSERTF(req->rq_reqbuf_len >= req->rq_reqlen,
"req %p: reqlen %d should smaller than buflen %d\n",
req, req->rq_reqlen, req->rq_reqbuf_len);
OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
req->rq_reqbuf = NULL;
req->rq_reqbuf_len = 0;
}
}
示例2: lov_llog_origin_add
/* Add log records for each OSC that this object is striped over, and return
* cookies for each one. We _would_ have nice abstraction here, except that
* we need to keep cookies in stripe order, even if some are NULL, so that
* the right cookies are passed back to the right OSTs at the client side.
* Unset cookies should be all-zero (which will never occur naturally). */
static int lov_llog_origin_add(const struct lu_env *env,
struct llog_ctxt *ctxt,
struct llog_rec_hdr *rec,
struct lov_stripe_md *lsm,
struct llog_cookie *logcookies, int numcookies)
{
struct obd_device *obd = ctxt->loc_obd;
struct lov_obd *lov = &obd->u.lov;
int i, rc = 0, cookies = 0;
ENTRY;
LASSERTF(logcookies && numcookies >= lsm->lsm_stripe_count,
"logcookies %p, numcookies %d lsm->lsm_stripe_count %d \n",
logcookies, numcookies, lsm->lsm_stripe_count);
for (i = 0; i < lsm->lsm_stripe_count; i++) {
struct lov_oinfo *loi = lsm->lsm_oinfo[i];
struct obd_device *child =
lov->lov_tgts[loi->loi_ost_idx]->ltd_exp->exp_obd;
struct llog_ctxt *cctxt = llog_get_context(child, ctxt->loc_idx);
/* fill mds unlink/setattr log record */
switch (rec->lrh_type) {
case MDS_UNLINK_REC: {
struct llog_unlink_rec *lur = (struct llog_unlink_rec *)rec;
lur->lur_oid = ostid_id(&loi->loi_oi);
lur->lur_oseq = (__u32)ostid_seq(&loi->loi_oi);
break;
}
case MDS_SETATTR64_REC: {
struct llog_setattr64_rec *lsr = (struct llog_setattr64_rec *)rec;
lsr->lsr_oi = loi->loi_oi;
break;
}
default:
break;
}
/* inject error in llog_obd_add() below */
if (OBD_FAIL_CHECK(OBD_FAIL_MDS_FAIL_LOV_LOG_ADD)) {
llog_ctxt_put(cctxt);
cctxt = NULL;
}
rc = llog_obd_add(env, cctxt, rec, NULL, logcookies + cookies,
numcookies - cookies);
llog_ctxt_put(cctxt);
if (rc < 0) {
CERROR("Can't add llog (rc = %d) for stripe %d\n",
rc, cookies);
memset(logcookies + cookies, 0,
sizeof(struct llog_cookie));
rc = 1; /* skip this cookie */
}
/* Note that rc is always 1 if llog_obd_add was successful */
cookies += rc;
}
RETURN(cookies);
}
示例3: opcode_offset
const char *ll_opcode2str(__u32 opcode)
{
/* When one of the assertions below fail, chances are that:
* 1) A new opcode was added in include/lustre/lustre_idl.h,
* but is missing from the table above.
* or 2) The opcode space was renumbered or rearranged,
* and the opcode_offset() function in
* ptlrpc_internal.h needs to be modified.
*/
__u32 offset = opcode_offset(opcode);
LASSERTF(offset < LUSTRE_MAX_OPCODES,
"offset %u >= LUSTRE_MAX_OPCODES %u\n",
offset, LUSTRE_MAX_OPCODES);
LASSERTF(ll_rpc_opcode_table[offset].opcode == opcode,
"ll_rpc_opcode_table[%u].opcode %u != opcode %u\n",
offset, ll_rpc_opcode_table[offset].opcode, opcode);
return ll_rpc_opcode_table[offset].opname;
}
示例4: qsd_glb_blocking_ast
/*
* Blocking callback handler for global index lock
*
* \param lock - is the lock for which ast occurred.
* \param desc - is the description of a conflicting lock in case of blocking
* ast.
* \param data - is the value of lock->l_ast_data
* \param flag - LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
* cancellation and blocking ast's.
*/
static int qsd_glb_blocking_ast(struct ldlm_lock *lock,
struct ldlm_lock_desc *desc, void *data,
int flag)
{
int rc = 0;
ENTRY;
switch(flag) {
case LDLM_CB_BLOCKING: {
struct lustre_handle lockh;
LDLM_DEBUG(lock, "blocking AST on global quota lock");
ldlm_lock2handle(lock, &lockh);
rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
break;
}
case LDLM_CB_CANCELING: {
struct qsd_qtype_info *qqi;
LDLM_DEBUG(lock, "canceling global quota lock");
qqi = qsd_glb_ast_data_get(lock, true);
if (qqi == NULL)
break;
/* we are losing the global index lock, so let's mark the
* global & slave indexes as not up-to-date any more */
write_lock(&qqi->qqi_qsd->qsd_lock);
qqi->qqi_glb_uptodate = false;
qqi->qqi_slv_uptodate = false;
if (lock->l_handle.h_cookie == qqi->qqi_lockh.cookie)
memset(&qqi->qqi_lockh, 0, sizeof(qqi->qqi_lockh));
write_unlock(&qqi->qqi_qsd->qsd_lock);
CDEBUG(D_QUOTA, "%s: losing global index lock for %s type\n",
qqi->qqi_qsd->qsd_svname, QTYPE_NAME((qqi->qqi_qtype)));
/* kick off reintegration thread if not running already, if
* it's just local cancel (for stack clean up or eviction),
* don't re-trigger the reintegration. */
if (!ldlm_is_local_only(lock))
qsd_start_reint_thread(qqi);
lu_ref_del(&qqi->qqi_reference, "ast_data_get", lock);
qqi_putref(qqi);
break;
}
default:
LASSERTF(0, "invalid flags for blocking ast %d", flag);
}
RETURN(rc);
}
示例5: pop_ctxt
void pop_ctxt(struct lvfs_run_ctxt *saved, struct lvfs_run_ctxt *new_ctx)
{
/* if there is underlaying dt_device then pop_ctxt is not needed */
if (new_ctx->dt != NULL)
return;
ASSERT_CTXT_MAGIC(saved->magic);
ASSERT_KERNEL_CTXT("popping non-kernel context!\n");
LASSERTF(current->fs->pwd.dentry == new_ctx->pwd, "%p != %p\n",
current->fs->pwd.dentry, new_ctx->pwd);
LASSERTF(current->fs->pwd.mnt == new_ctx->pwdmnt, "%p != %p\n",
current->fs->pwd.mnt, new_ctx->pwdmnt);
set_fs(saved->fs);
ll_set_fs_pwd(current->fs, saved->pwdmnt, saved->pwd);
dput(saved->pwd);
mntput(saved->pwdmnt);
current->fs->umask = saved->umask;
}
示例6: ll_teardown_mmaps
/* XXX put nice comment here. talk about __free_pte -> dirty pages and
* nopage's reference passing to the pte
*/
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
{
int rc = -ENOENT;
LASSERTF(last > first, "last %llu first %llu\n", last, first);
if (mapping_mapped(mapping)) {
rc = 0;
unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
last - first + 1, 0);
}
return rc;
}
示例7: pop_ctxt
void pop_ctxt(struct lvfs_run_ctxt *saved, struct lvfs_run_ctxt *new_ctx,
struct lvfs_ucred *uc)
{
/* if there is underlaying dt_device then pop_ctxt is not needed */
if (new_ctx->dt != NULL)
return;
ASSERT_CTXT_MAGIC(saved->magic);
ASSERT_KERNEL_CTXT("popping non-kernel context!\n");
LASSERTF(cfs_fs_pwd(current->fs) == new_ctx->pwd, "%p != %p\n",
cfs_fs_pwd(current->fs), new_ctx->pwd);
LASSERTF(cfs_fs_mnt(current->fs) == new_ctx->pwdmnt, "%p != %p\n",
cfs_fs_mnt(current->fs), new_ctx->pwdmnt);
set_fs(saved->fs);
ll_set_fs_pwd(current->fs, saved->pwdmnt, saved->pwd);
dput(saved->pwd);
mntput(saved->pwdmnt);
current->fs->umask = saved->luc.luc_umask;
if (uc) {
struct cred *cred;
cred = prepare_creds();
if (cred) {
cred->uid = saved->luc.luc_uid;
cred->gid = saved->luc.luc_gid;
cred->fsuid = saved->luc.luc_fsuid;
cred->fsgid = saved->luc.luc_fsgid;
cred->cap_effective = saved->luc.luc_cap;
commit_creds(cred);
}
pop_group_info(saved,
uc->luc_ginfo ?:
uc->luc_identity ? uc->luc_identity->mi_ginfo :
NULL);
}
}
示例8: LASSERTF
struct tgt_handler *out_handler_find(__u32 opc)
{
struct tgt_handler *h;
h = NULL;
if (OBJ_CREATE <= opc && opc < OBJ_LAST) {
h = &out_update_ops[opc - OBJ_CREATE];
LASSERTF(h->th_opc == opc, "opcode mismatch %d != %d\n",
h->th_opc, opc);
} else {
h = NULL; /* unsupported opc */
}
return h;
}
示例9: ptl_send_buf
/**
* Helper function. Sends \a len bytes from \a base at offset \a offset
* over \a conn connection to portal \a portal.
* Returns 0 on success or error code.
*/
static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len,
lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
struct ptlrpc_connection *conn, int portal, __u64 xid,
unsigned int offset)
{
int rc;
lnet_md_t md;
LASSERT(portal != 0);
LASSERT(conn != NULL);
CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
md.start = base;
md.length = len;
md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
md.options = PTLRPC_MD_OPTIONS;
md.user_ptr = cbid;
md.eq_handle = ptlrpc_eq_h;
if (unlikely(ack == LNET_ACK_REQ &&
OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK,
OBD_FAIL_ONCE))) {
/* don't ask for the ack to simulate failing client */
ack = LNET_NOACK_REQ;
}
rc = LNetMDBind(md, LNET_UNLINK, mdh);
if (unlikely(rc != 0)) {
CERROR("LNetMDBind failed: %d\n", rc);
LASSERT(rc == -ENOMEM);
return -ENOMEM;
}
CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %lld, offset %u\n",
len, portal, xid, offset);
rc = LNetPut(conn->c_self, *mdh, ack,
conn->c_peer, portal, xid, offset, 0);
if (unlikely(rc != 0)) {
int rc2;
/* We're going to get an UNLINK event when I unlink below,
* which will complete just like any other failed send, so
* I fall through and return success here! */
CERROR("LNetPut(%s, %d, %lld) failed: %d\n",
libcfs_id2str(conn->c_peer), portal, xid, rc);
rc2 = LNetMDUnlink(*mdh);
LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
}
return 0;
}
示例10: lprocfs_counter_add
void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount)
{
struct lprocfs_counter *percpu_cntr;
struct lprocfs_counter_header *header;
int smp_id;
unsigned long flags = 0;
if (stats == NULL)
return;
LASSERTF(0 <= idx && idx < stats->ls_num,
"idx %d, ls_num %hu\n", idx, stats->ls_num);
/* With per-client stats, statistics are allocated only for
* single CPU area, so the smp_id should be 0 always. */
smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
if (smp_id < 0)
return;
header = &stats->ls_cnt_header[idx];
percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx);
percpu_cntr->lc_count++;
if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
/*
* lprocfs_counter_add() can be called in interrupt context,
* as memory allocation could trigger memory shrinker call
* ldlm_pool_shrink(), which calls lprocfs_counter_add().
* LU-1727.
*
* Only obd_memory uses LPROCFS_STATS_FLAG_IRQ_SAFE
* flag, because it needs accurate counting lest memory leak
* check reports error.
*/
if (in_interrupt() &&
(stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
percpu_cntr->lc_sum_irq += amount;
else
percpu_cntr->lc_sum += amount;
if (header->lc_config & LPROCFS_CNTR_STDDEV)
percpu_cntr->lc_sumsquare += (__s64)amount * amount;
if (amount < percpu_cntr->lc_min)
percpu_cntr->lc_min = amount;
if (amount > percpu_cntr->lc_max)
percpu_cntr->lc_max = amount;
}
lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags);
}
示例11: osc_object_prune
static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
{
struct osc_object *osc = cl2osc(obj);
struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname;
LASSERTF(osc->oo_npages == 0,
DFID "still have %lu pages, obj: %p, osc: %p\n",
PFID(lu_object_fid(&obj->co_lu)), osc->oo_npages, obj, osc);
/* DLM locks don't hold a reference of osc_object so we have to
* clear it before the object is being destroyed. */
ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname,
osc_object_ast_clear, osc);
return 0;
}
示例12: llog_initiator_connect
int llog_initiator_connect(struct llog_ctxt *ctxt)
{
struct obd_import *new_imp;
LASSERT(ctxt);
new_imp = ctxt->loc_obd->u.cli.cl_import;
LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp,
"%p - %p\n", ctxt->loc_imp, new_imp);
mutex_lock(&ctxt->loc_mutex);
if (ctxt->loc_imp != new_imp) {
if (ctxt->loc_imp)
class_import_put(ctxt->loc_imp);
ctxt->loc_imp = class_import_get(new_imp);
}
mutex_unlock(&ctxt->loc_mutex);
return 0;
}
示例13: ll_frob_intent
void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft)
{
struct lookup_intent *it = *itp;
#ifdef HAVE_VFS_INTENT_PATCHES
if (it) {
LASSERTF(it->it_magic == INTENT_MAGIC,
"%p has bad intent magic: %x\n",
it, it->it_magic);
}
#endif
if (!it || it->it_op == IT_GETXATTR)
it = *itp = deft;
#ifdef HAVE_VFS_INTENT_PATCHES
it->it_op_release = ll_intent_release;
#endif
}
示例14: out_tx_end
int out_tx_end(const struct lu_env *env, struct thandle_exec_args *ta)
{
struct tgt_session_info *tsi = tgt_ses_info(env);
int i = 0, rc;
LASSERT(ta->ta_dev);
LASSERT(ta->ta_handle);
if (ta->ta_err != 0 || ta->ta_argno == 0)
GOTO(stop, rc = ta->ta_err);
rc = out_trans_start(env, ta);
if (unlikely(rc))
GOTO(stop, rc);
for (i = 0; i < ta->ta_argno; i++) {
rc = ta->ta_args[i].exec_fn(env, ta->ta_handle,
&ta->ta_args[i]);
if (unlikely(rc)) {
CDEBUG(D_INFO, "error during execution of #%u from"
" %s:%d: rc = %d\n", i, ta->ta_args[i].file,
ta->ta_args[i].line, rc);
while (--i >= 0) {
LASSERTF(ta->ta_args[i].undo_fn != NULL,
"can't undo changes, hope for failover!\n");
ta->ta_args[i].undo_fn(env, ta->ta_handle,
&ta->ta_args[i]);
}
break;
}
}
/* Only fail for real update */
tsi->tsi_reply_fail_id = OBD_FAIL_UPDATE_OBJ_NET_REP;
stop:
CDEBUG(D_INFO, "%s: executed %u/%u: rc = %d\n",
dt_obd_name(ta->ta_dev), i, ta->ta_argno, rc);
out_trans_stop(env, ta, rc);
ta->ta_handle = NULL;
ta->ta_argno = 0;
ta->ta_err = 0;
RETURN(rc);
}
示例15: range_alloc_set
/*
* This function implements new seq allocation algorithm using async
* updates to seq file on disk. ref bug 18857 for details.
* there are four variable to keep track of this process
*
* lss_space; - available lss_space
* lss_lowater_set; - lu_seq_range for all seqs before barrier, i.e. safe to use
* lss_hiwater_set; - lu_seq_range after barrier, i.e. allocated but may be
* not yet committed
*
* when lss_lowater_set reaches the end it is replaced with hiwater one and
* a write operation is initiated to allocate new hiwater range.
* if last seq write opearion is still not commited, current operation is
* flaged as sync write op.
*/
static int range_alloc_set(const struct lu_env *env,
struct lu_seq_range *out,
struct lu_server_seq *seq)
{
struct lu_seq_range *space = &seq->lss_space;
struct lu_seq_range *loset = &seq->lss_lowater_set;
struct lu_seq_range *hiset = &seq->lss_hiwater_set;
int rc = 0;
if (lu_seq_range_is_zero(loset))
__seq_set_init(env, seq);
if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_ALLOC)) /* exhaust set */
loset->lsr_start = loset->lsr_end;
if (lu_seq_range_is_exhausted(loset)) {
/* reached high water mark. */
struct lu_device *dev = seq->lss_site->ss_lu->ls_top_dev;
int obd_num_clients = dev->ld_obd->obd_num_exports;
__u64 set_sz;
/* calculate new seq width based on number of clients */
set_sz = max(seq->lss_set_width,
obd_num_clients * seq->lss_width);
set_sz = min(lu_seq_range_space(space), set_sz);
/* Switch to hiwater range now */
*loset = *hiset;
/* allocate new hiwater range */
range_alloc(hiset, space, set_sz);
/* update ondisk seq with new *space */
rc = seq_store_update(env, seq, NULL, seq->lss_need_sync);
}
LASSERTF(!lu_seq_range_is_exhausted(loset) ||
lu_seq_range_is_sane(loset),
DRANGE"\n", PRANGE(loset));
if (rc == 0)
range_alloc(out, loset, seq->lss_width);
RETURN(rc);
}