本文整理汇总了C++中severe函数的典型用法代码示例。如果您正苦于以下问题:C++ severe函数的具体用法?C++ severe怎么用?C++ severe使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了severe函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: exp_cache_build_invalidate_all_msg
gw_header_t * exp_cache_build_invalidate_all_msg(exp_cache_dirty_ctx_t * ctx_p,
int srv_rank)
{
exp_cache_srv_front_end_t * front_end_p;
/*
** Retrieve front end context of this server
*/
if (srv_rank >= EXP_MAX_CACHE_SRV) {
severe("server rank out of range %d",srv_rank);
return NULL;
}
front_end_p = ctx_p->srv_rank[srv_rank];
if (front_end_p == NULL) {
severe("server %d do not exist",srv_rank);
return NULL;
}
/*
** Initialize the message header
*/
gw_invalidate_all_msg.export_id = exp_cache_cnf.export_id;
gw_invalidate_all_msg.gateway_rank = srv_rank;
gw_invalidate_all_msg.nb_gateways = ctx_p->nb_cache_servers;
return &gw_invalidate_all_msg;
}
示例2: exp_cache_build_configuration_msg
gw_configuration_t * exp_cache_build_configuration_msg(exp_cache_dirty_ctx_t * ctx_p,
uint16_t port,
int srv_rank)
{
exp_cache_srv_front_end_t * front_end_p;
/*
** Retrieve front end context of this server
*/
if (srv_rank >= EXP_MAX_CACHE_SRV) {
severe("server rank out of range %d",srv_rank);
return NULL;
}
front_end_p = ctx_p->srv_rank[srv_rank];
if (front_end_p == NULL) {
severe("server %d do not exist",srv_rank);
return NULL;
}
/*
** Initialize the message header
*/
gw_configuration_msg.hdr.export_id = exp_cache_cnf.export_id;
gw_configuration_msg.hdr.gateway_rank = srv_rank;
gw_configuration_msg.hdr.nb_gateways = ctx_p->nb_cache_servers;
// gw_configuration_msg.ipAddr = exp_cache_cnf.export_ipAddr;
// gw_configuration_msg.port = exp_cache_cnf.export_port;
// gw_configuration_msg.eid.eid_len = exp_cache_cnf.eid_nb;
// gw_configuration_msg.eid.eid_val = exp_cache_cnf.eid_list;
return &gw_configuration_msg;
}
示例3: lk
void* MemoryMappedFile::remapPrivateView(void* oldPrivateAddr) {
LockMongoFilesExclusive lockMongoFiles;
privateViews.clearWritableBits(oldPrivateAddr, len);
stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
if (!UnmapViewOfFile(oldPrivateAddr)) {
DWORD dosError = GetLastError();
severe() << "UnMapViewOfFile for " << filename() << " failed with error "
<< errnoWithDescription(dosError) << " in MemoryMappedFile::remapPrivateView"
<< endl;
fassertFailed(16168);
}
void* newPrivateView =
MapViewOfFileEx(maphandle, // file mapping handle
FILE_MAP_READ, // access
0,
0, // file offset, high and low
0, // bytes to map, 0 == all
oldPrivateAddr); // we want the same address we had before
if (0 == newPrivateView) {
DWORD dosError = GetLastError();
severe() << "MapViewOfFileEx for " << filename() << " failed with error "
<< errnoWithDescription(dosError) << " (file size is " << len << ")"
<< " in MemoryMappedFile::remapPrivateView" << endl;
}
fassert(16148, newPrivateView == oldPrivateAddr);
return newPrivateView;
}
示例4: exp_dirty_active_switch
/**
* Change the active set of the dirty management context for a given fron end
@param ctx_p: pointer to the dirty managmenet context
@param srv_rank : Server to build the message for
@retval none
*/
void exp_dirty_active_switch(exp_cache_dirty_ctx_t *ctx_p,
int srv_rank)
{
exp_cache_srv_front_end_t * front_end_p;
int inactive_idx;
uint32_t count;
/*
** Retrieve front end context of this server
*/
if (srv_rank >= EXP_MAX_CACHE_SRV) {
severe("server rank out of range %d",srv_rank);
return;
}
front_end_p = ctx_p->srv_rank[srv_rank];
if (front_end_p == NULL) {
severe("server %d do not exist",srv_rank);
return ;
}
inactive_idx = 1 - front_end_p->active_idx;
/*
** Clear the data of the inactive set
*/
memset(front_end_p->parent[inactive_idx],0,sizeof(exp_dirty_dirty_parent_t));
count = (1 << ctx_p->level0_sz)/EXP_CHILD_BITMAP_BIT_SZ;
memset(front_end_p->child[inactive_idx],0,sizeof(exp_dirty_dirty_child_t)*count);
/*
** Switch the active set
*/
front_end_p->active_idx = inactive_idx;
}
示例5: transaction
long long BackgroundSync::_readLastAppliedHash(OperationContext* txn) {
BSONObj oplogEntry;
try {
ScopedTransaction transaction(txn, MODE_IX);
Lock::DBLock lk(txn->lockState(), "local", MODE_X);
bool success = Helpers::getLast(txn, rsoplog, oplogEntry);
if (!success) {
// This can happen when we are to do an initial sync. lastHash will be set
// after the initial sync is complete.
return 0;
}
}
catch (const DBException& ex) {
severe() << "Problem reading " << rsoplog << ": " << ex.toStatus();
fassertFailed(18904);
}
BSONElement hashElement = oplogEntry[hashFieldName];
if (hashElement.eoo()) {
severe() << "Most recent entry in " << rsoplog << " missing \"" << hashFieldName <<
"\" field";
fassertFailed(18902);
}
if (hashElement.type() != NumberLong) {
severe() << "Expected type of \"" << hashFieldName << "\" in most recent " <<
rsoplog << " entry to have type NumberLong, but found " <<
typeName(hashElement.type());
fassertFailed(18903);
}
return hashElement.safeNumberLong();
}
示例6: multiSyncApply
// This free function is used by the writer threads to apply each op
void multiSyncApply(const std::vector<BSONObj>& ops, SyncTail* st) {
initializeWriterThread();
OperationContextImpl txn;
txn.setReplicatedWrites(false);
DisableDocumentValidation validationDisabler(&txn);
// allow us to get through the magic barrier
txn.lockState()->setIsBatchWriter(true);
bool convertUpdatesToUpserts = true;
for (std::vector<BSONObj>::const_iterator it = ops.begin(); it != ops.end(); ++it) {
try {
const Status s = SyncTail::syncApply(&txn, *it, convertUpdatesToUpserts);
if (!s.isOK()) {
severe() << "Error applying operation (" << it->toString() << "): " << s;
fassertFailedNoTrace(16359);
}
} catch (const DBException& e) {
severe() << "writer worker caught exception: " << causedBy(e)
<< " on: " << it->toString();
if (inShutdown()) {
return;
}
fassertFailedNoTrace(16360);
}
}
}
示例7: transaction
long long BackgroundSync::_readLastAppliedHash(OperationContext* txn) {
BSONObj oplogEntry;
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
ScopedTransaction transaction(txn, MODE_IX);
Lock::DBLock lk(txn->lockState(), "local", MODE_X);
bool success = Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry);
if (!success) {
// This can happen when we are to do an initial sync. lastHash will be set
// after the initial sync is complete.
return 0;
}
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "readLastAppliedHash", rsOplogName);
} catch (const DBException& ex) {
severe() << "Problem reading " << rsOplogName << ": " << ex.toStatus();
fassertFailed(18904);
}
long long hash;
auto status = bsonExtractIntegerField(oplogEntry, kHashFieldName, &hash);
if (!status.isOK()) {
severe() << "Most recent entry in " << rsOplogName << " is missing or has invalid \""
<< kHashFieldName << "\" field. Oplog entry: " << oplogEntry << ": " << status;
fassertFailed(18902);
}
return hash;
}
示例8: reference
/**
geo_proc_createIndex
create a context given by index
That function tries to allocate a free context.
In case of success, it returns the index of the context.
@param : context_id is the reference of the context
@retval : MS controller reference (if OK)
retval -1 if out of context.
*/
uint32_t geo_proc_createIndex(uint32_t context_id) {
geo_proc_ctx_t *p;
/*
** Get the first free context
*/
p = geo_proc_getObjCtx_p(context_id);
if (p == NULL) {
severe( "MS ref out of range: %u", context_id );
return RUC_NOK;
}
/*
** return an error if the context is not free
*/
if (p->free == FALSE) {
severe( "the context is not free : %u", context_id );
return RUC_NOK;
}
/*
** reinitilisation of the context
*/
geo_proc_ctxInit(p, FALSE);
/*
** remove it for the linked list
*/
geo_proc_context_allocated++;
p->free = FALSE;
ruc_objRemove((ruc_obj_desc_t*) p);
return RUC_OK;
}
示例9: lk
void BackgroundSync::loadLastAppliedHash(OperationContext* txn) {
Lock::DBRead lk(txn->lockState(), rsoplog);
BSONObj oplogEntry;
try {
if (!Helpers::getLast(txn, rsoplog, oplogEntry)) {
// This can happen when we are to do an initial sync. lastHash will be set
// after the initial sync is complete.
_lastAppliedHash = 0;
return;
}
}
catch (const DBException& ex) {
severe() << "Problem reading " << rsoplog << ": " << ex.toStatus();
fassertFailed(18904);
}
BSONElement hashElement = oplogEntry[hashFieldName];
if (hashElement.eoo()) {
severe() << "Most recent entry in " << rsoplog << " missing \"" << hashFieldName <<
"\" field";
fassertFailed(18902);
}
if (hashElement.type() != NumberLong) {
severe() << "Expected type of \"" << hashFieldName << "\" in most recent " <<
rsoplog << " entry to have type NumberLong, but found " <<
typeName(hashElement.type());
fassertFailed(18903);
}
_lastAppliedHash = hashElement.safeNumberLong();
}
示例10: invariant
void ReplicationRecoveryImpl::_applyToEndOfOplog(OperationContext* opCtx,
Timestamp oplogApplicationStartPoint,
Timestamp topOfOplog) {
invariant(!oplogApplicationStartPoint.isNull());
invariant(!topOfOplog.isNull());
// Check if we have any unapplied ops in our oplog. It is important that this is done after
// deleting the ragged end of the oplog.
if (oplogApplicationStartPoint == topOfOplog) {
log()
<< "No oplog entries to apply for recovery. appliedThrough is at the top of the oplog.";
return; // We've applied all the valid oplog we have.
} else if (oplogApplicationStartPoint > topOfOplog) {
severe() << "Applied op " << oplogApplicationStartPoint.toBSON()
<< " not found. Top of oplog is " << topOfOplog.toBSON() << '.';
fassertFailedNoTrace(40313);
}
log() << "Replaying stored operations from " << oplogApplicationStartPoint.toBSON()
<< " (exclusive) to " << topOfOplog.toBSON() << " (inclusive).";
DBDirectClient db(opCtx);
auto cursor = db.query(NamespaceString::kRsOplogNamespace.ns(),
QUERY("ts" << BSON("$gte" << oplogApplicationStartPoint)),
/*batchSize*/ 0,
/*skip*/ 0,
/*projection*/ nullptr,
QueryOption_OplogReplay);
// Check that the first document matches our appliedThrough point then skip it since it's
// already been applied.
if (!cursor->more()) {
// This should really be impossible because we check above that the top of the oplog is
// strictly > appliedThrough. If this fails it represents a serious bug in either the
// storage engine or query's implementation of OplogReplay.
severe() << "Couldn't find any entries in the oplog >= "
<< oplogApplicationStartPoint.toBSON() << " which should be impossible.";
fassertFailedNoTrace(40293);
}
auto firstTimestampFound =
fassertStatusOK(40291, OpTime::parseFromOplogEntry(cursor->nextSafe())).getTimestamp();
if (firstTimestampFound != oplogApplicationStartPoint) {
severe() << "Oplog entry at " << oplogApplicationStartPoint.toBSON()
<< " is missing; actual entry found is " << firstTimestampFound.toBSON();
fassertFailedNoTrace(40292);
}
// Apply remaining ops one at at time, but don't log them because they are already logged.
UnreplicatedWritesBlock uwb(opCtx);
while (cursor->more()) {
auto entry = cursor->nextSafe();
fassertStatusOK(40294,
SyncTail::syncApply(opCtx, entry, OplogApplication::Mode::kRecovering));
_consistencyMarkers->setAppliedThrough(
opCtx, fassertStatusOK(40295, OpTime::parseFromOplogEntry(entry)));
}
}
示例11: storaged_lbg_initialize
int storaged_lbg_initialize(mstorage_t *s, int index) {
int lbg_size;
int ret;
int i;
int local=1;
DEBUG_FUNCTION;
#if 0
/*
** configure the callback that is intended to perform the polling of the storaged on each TCP connection
*/
ret = north_lbg_attach_application_supervision_callback(s->lbg_id[index],(af_stream_poll_CBK_t)storcli_lbg_cnx_polling);
if (ret < 0)
{
severe("Cannot configure Soraged polling callback");
}
ret = north_lbg_set_application_tmo4supervision(s->lbg_id[index],20);
if (ret < 0)
{
severe("Cannot configure application TMO");
}
#endif
/*
** set the dscp for storio connections
*/
af_inet_storaged_conf.dscp=(uint8_t)common_config.storio_dscp;
af_inet_storaged_conf.dscp = af_inet_storaged_conf.dscp <<2;
/*
** store the IP address and port in the list of the endpoint
*/
lbg_size = s->sclients_nb;
for (i = 0; i < lbg_size; i++)
{
my_list[i].remote_port_host = s->sclients[i].port;
my_list[i].remote_ipaddr_host = s->sclients[i].ipv4;
if (!is_this_ipV4_local(s->sclients[i].ipv4)) local = 0;
}
af_inet_storaged_conf.recv_srv_type = ROZOFS_RPC_SRV;
af_inet_storaged_conf.rpc_recv_max_sz = rozofs_storcli_south_large_buf_sz;
ret = north_lbg_configure_af_inet(s->lbg_id[index],
s->host,
INADDR_ANY,0,
my_list,
ROZOFS_SOCK_FAMILY_STORAGE_NORTH,lbg_size,&af_inet_storaged_conf, local);
if (ret < 0)
{
severe("Cannot create Load Balancing Group %d for storaged %s",s->lbg_id[index],s->host);
return -1;
}
north_lbg_set_next_global_entry_idx_p(s->lbg_id[index],&storcli_next_storio_global_index);
return 0;
}
示例12: while
/* applies oplog from "now" until endOpTime using the applier threads for initial sync*/
void SyncTail::_applyOplogUntil(OperationContext* txn, const OpTime& endOpTime) {
unsigned long long bytesApplied = 0;
unsigned long long entriesApplied = 0;
while (true) {
OpQueue ops;
OperationContextImpl ctx;
while (!tryPopAndWaitForMore(&ops, getGlobalReplicationCoordinator())) {
// nothing came back last time, so go again
if (ops.empty()) continue;
// Check if we reached the end
const BSONObj currentOp = ops.back();
const OpTime currentOpTime = currentOp["ts"]._opTime();
// When we reach the end return this batch
if (currentOpTime == endOpTime) {
break;
}
else if (currentOpTime > endOpTime) {
severe() << "Applied past expected end " << endOpTime << " to " << currentOpTime
<< " without seeing it. Rollback?" << rsLog;
fassertFailedNoTrace(18693);
}
// apply replication batch limits
if (ops.getSize() > replBatchLimitBytes)
break;
if (ops.getDeque().size() > replBatchLimitOperations)
break;
};
if (ops.empty()) {
severe() << "got no ops for batch...";
fassertFailedNoTrace(18692);
}
const BSONObj lastOp = ops.back().getOwned();
// Tally operation information
bytesApplied += ops.getSize();
entriesApplied += ops.getDeque().size();
multiApply(ops.getDeque());
OpTime lastOpTime = applyOpsToOplog(&ops.getDeque());
// if the last op applied was our end, return
if (lastOpTime == endOpTime) {
LOG(1) << "SyncTail applied " << entriesApplied
<< " entries (" << bytesApplied << " bytes)"
<< " and finished at opTime " << endOpTime.toStringPretty();
return;
}
} // end of while (true)
}
示例13: log
BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query?
const char* ns = o.getStringField("ns");
// capped collections
Collection* collection = db->getCollection(ns);
if (collection && collection->isCapped()) {
log() << "missing doc, but this is okay for a capped collection (" << ns << ")";
return BSONObj();
}
const int retryMax = 3;
for (int retryCount = 1; retryCount <= retryMax; ++retryCount) {
if (retryCount != 1) {
// if we are retrying, sleep a bit to let the network possibly recover
sleepsecs(retryCount * retryCount);
}
try {
bool ok = missingObjReader.connect(HostAndPort(_hostname));
if (!ok) {
warning() << "network problem detected while connecting to the "
<< "sync source, attempt " << retryCount << " of " << retryMax << endl;
continue; // try again
}
} catch (const SocketException&) {
warning() << "network problem detected while connecting to the "
<< "sync source, attempt " << retryCount << " of " << retryMax << endl;
continue; // try again
}
// get _id from oplog entry to create query to fetch document.
const BSONElement opElem = o.getField("op");
const bool isUpdate = !opElem.eoo() && opElem.str() == "u";
const BSONElement idElem = o.getObjectField(isUpdate ? "o2" : "o")["_id"];
if (idElem.eoo()) {
severe() << "cannot fetch missing document without _id field: " << o.toString();
fassertFailedNoTrace(28742);
}
BSONObj query = BSONObjBuilder().append(idElem).obj();
BSONObj missingObj;
try {
missingObj = missingObjReader.findOne(ns, query);
} catch (const SocketException&) {
warning() << "network problem detected while fetching a missing document from the "
<< "sync source, attempt " << retryCount << " of " << retryMax << endl;
continue; // try again
} catch (DBException& e) {
error() << "assertion fetching missing object: " << e.what() << endl;
throw;
}
// success!
return missingObj;
}
// retry count exceeded
msgasserted(15916,
str::stream() << "Can no longer connect to initial sync source: " << _hostname);
}
示例14: StorageEngineLockFile
void ServiceContextMongoD::createLockFile() {
try {
_lockFile.reset(new StorageEngineLockFile(storageGlobalParams.dbpath));
} catch (const std::exception& ex) {
uassert(28596,
str::stream() << "Unable to determine status of lock file in the data directory "
<< storageGlobalParams.dbpath << ": " << ex.what(),
false);
}
bool wasUnclean = _lockFile->createdByUncleanShutdown();
auto openStatus = _lockFile->open();
if (storageGlobalParams.readOnly && openStatus == ErrorCodes::IllegalOperation) {
_lockFile.reset();
} else {
uassertStatusOK(openStatus);
}
if (wasUnclean) {
if (storageGlobalParams.readOnly) {
severe() << "Attempted to open dbpath in readOnly mode, but the server was "
"previously not shut down cleanly.";
fassertFailedNoTrace(34416);
}
warning() << "Detected unclean shutdown - " << _lockFile->getFilespec() << " is not empty.";
}
}
示例15: severe
Message OpMsgBuilder::finish() {
if (kDebugBuild && !disableDupeFieldCheck_forTest.load()) {
std::set<StringData> seenFields;
for (auto elem : resumeBody().asTempObj()) {
if (!(seenFields.insert(elem.fieldNameStringData()).second)) {
severe() << "OP_MSG with duplicate field '" << elem.fieldNameStringData()
<< "' : " << redact(resumeBody().asTempObj());
fassert(40474, false);
}
}
}
invariant(_state == kBody);
invariant(_bodyStart);
invariant(!_openBuilder);
_state = kDone;
const auto size = _buf.len();
MSGHEADER::View header(_buf.buf());
header.setMessageLength(size);
// header.setRequestMsgId(...); // These are currently filled in by the networking layer.
// header.setResponseToMsgId(...);
header.setOpCode(dbMsg);
return Message(_buf.release());
}