本文整理汇总了C++中Status::code方法的典型用法代码示例。如果您正苦于以下问题:C++ Status::code方法的具体用法?C++ Status::code怎么用?C++ Status::code使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Status
的用法示例。
在下文中一共展示了Status::code方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _checkAuthForPrivilegeHelper
Status AuthorizationSession::_checkAuthForPrivilegeHelper(const Privilege& privilege) {
AuthorizationManager& authMan = getAuthorizationManager();
Privilege modifiedPrivilege = _modifyPrivilegeForSpecialCases(privilege);
// Need to check not just the resource of the privilege, but also just the database
// component and the "*" resource.
std::string resourceSearchList[3];
resourceSearchList[0] = AuthorizationManager::WILDCARD_RESOURCE_NAME;
resourceSearchList[1] = nsToDatabase(modifiedPrivilege.getResource());
resourceSearchList[2] = modifiedPrivilege.getResource();
ActionSet unmetRequirements = modifiedPrivilege.getActions();
UserSet::iterator it = _authenticatedUsers.begin();
while (it != _authenticatedUsers.end()) {
User* user = *it;
if (!user->isValid()) {
// Make a good faith effort to acquire an up-to-date user object, since the one
// we've cached is marked "out-of-date."
UserName name = user->getName();
User* updatedUser;
Status status = authMan.acquireUser(name, &updatedUser);
switch (status.code()) {
case ErrorCodes::OK: {
// Success! Replace the old User object with the updated one.
fassert(17067, _authenticatedUsers.replaceAt(it, updatedUser) == user);
authMan.releaseUser(user);
user = updatedUser;
LOG(1) << "Updated session cache of user information for " << name;
break;
}
case ErrorCodes::UserNotFound: {
// User does not exist anymore; remove it from _authenticatedUsers.
fassert(17068, _authenticatedUsers.removeAt(it) == user);
authMan.releaseUser(user);
LOG(1) << "Removed deleted user " << name <<
" from session cache of user information.";
continue; // No need to advance "it" in this case.
}
default:
// Unrecognized error; assume that it's transient, and continue working with the
// out-of-date privilege data.
warning() << "Could not fetch updated user privilege information for " <<
name << "; continuing to use old information. Reason is " << status;
break;
}
}
for (int i = 0; i < static_cast<int>(boost::size(resourceSearchList)); ++i) {
ActionSet userActions = user->getActionsForResource(resourceSearchList[i]);
unmetRequirements.removeAllActionsFromSet(userActions);
if (unmetRequirements.empty())
return Status::OK();
}
++it;
}
return Status(ErrorCodes::Unauthorized, "unauthorized");
}
示例2: run
bool run( OperationContext* txn,
string const &db,
BSONObj &cmdObj,
int,
string &errmsg,
BSONObjBuilder &result,
bool ) {
string ns;
if ( !FieldParser::extract( cmdObj, nsField, &ns, &errmsg ) ) {
return false;
}
if ( ns == "" ) {
errmsg = "no collection name specified";
return false;
}
BSONObj startingFromKey;
if ( !FieldParser::extract( cmdObj,
startingFromKeyField,
&startingFromKey,
&errmsg ) ) {
return false;
}
WriteConcernOptions writeConcern;
Status status = writeConcern.parseSecondaryThrottle(cmdObj, NULL);
if (!status.isOK()){
if (status.code() != ErrorCodes::WriteConcernNotDefined) {
return appendCommandStatus(result, status);
}
writeConcern = DefaultWriteConcern;
}
else {
repl::ReplicationCoordinator* replCoordinator =
repl::getGlobalReplicationCoordinator();
Status status = replCoordinator->checkIfWriteConcernCanBeSatisfied(writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
}
if (writeConcern.shouldWaitForOtherNodes() &&
writeConcern.wTimeout == WriteConcernOptions::kNoTimeout) {
// Don't allow no timeout.
writeConcern.wTimeout = kDefaultWTimeoutMs;
}
if (!shardingState.enabled()) {
errmsg = str::stream() << "server is not part of a sharded cluster or "
<< "the sharding metadata is not yet initialized.";
return false;
}
ChunkVersion shardVersion;
status = shardingState.refreshMetadataNow( ns, &shardVersion );
if ( !status.isOK() ) {
if ( status.code() == ErrorCodes::RemoteChangeDetected ) {
warning() << "Shard version in transition detected while refreshing "
<< "metadata for " << ns << " at version " << shardVersion << endl;
}
else {
errmsg = str::stream() << "failed to refresh shard metadata: "
<< status.reason();
return false;
}
}
BSONObj stoppedAtKey;
CleanupResult cleanupResult = cleanupOrphanedData( txn,
NamespaceString( ns ),
startingFromKey,
writeConcern,
&stoppedAtKey,
&errmsg );
if ( cleanupResult == CleanupResult_Error ) {
return false;
}
if ( cleanupResult == CleanupResult_Continue ) {
result.append( stoppedAtKeyField(), stoppedAtKey );
}
else {
dassert( cleanupResult == CleanupResult_Done );
}
return true;
}
示例3: queryOp
void Strategy::queryOp(OperationContext* txn, Request& r) {
verify(!NamespaceString(r.getns()).isCommand());
Timer queryTimer;
globalOpCounters.gotQuery();
QueryMessage q(r.d());
NamespaceString ns(q.ns);
ClientBasic* client = txn->getClient();
AuthorizationSession* authSession = AuthorizationSession::get(client);
Status status = authSession->checkAuthForQuery(ns, q.query);
audit::logQueryAuthzCheck(client, ns, q.query, status.code());
uassertStatusOK(status);
LOG(3) << "query: " << q.ns << " " << q.query << " ntoreturn: " << q.ntoreturn
<< " options: " << q.queryOptions;
if (q.ntoreturn == 1 && strstr(q.ns, ".$cmd"))
throw UserException(8010, "something is wrong, shouldn't see a command here");
if (q.queryOptions & QueryOption_Exhaust) {
uasserted(18526,
string("the 'exhaust' query option is invalid for mongos queries: ") + q.ns +
" " + q.query.toString());
}
// Spigot which controls whether OP_QUERY style find on mongos uses the new ClusterClientCursor
// code path.
// TODO: Delete the spigot and always use the new code.
if (useClusterClientCursor) {
auto txn = cc().makeOperationContext();
ReadPreferenceSetting readPreference(ReadPreference::PrimaryOnly, TagSet::primaryOnly());
BSONElement rpElem;
auto readPrefExtractStatus = bsonExtractTypedField(
q.query, LiteParsedQuery::kFindCommandReadPrefField, mongo::Object, &rpElem);
if (readPrefExtractStatus.isOK()) {
auto parsedRps = ReadPreferenceSetting::fromBSON(rpElem.Obj());
uassertStatusOK(parsedRps.getStatus());
readPreference = parsedRps.getValue();
} else if (readPrefExtractStatus != ErrorCodes::NoSuchKey) {
uassertStatusOK(readPrefExtractStatus);
}
auto canonicalQuery = CanonicalQuery::canonicalize(q, WhereCallbackNoop());
uassertStatusOK(canonicalQuery.getStatus());
// Do the work to generate the first batch of results. This blocks waiting to get responses
// from the shard(s).
std::vector<BSONObj> batch;
// 0 means the cursor is exhausted and
// otherwise we assume that a cursor with the returned id can be retrieved via the
// ClusterCursorManager
auto cursorId =
ClusterFind::runQuery(txn.get(), *canonicalQuery.getValue(), readPreference, &batch);
uassertStatusOK(cursorId.getStatus());
// Build the response document.
// TODO: this constant should be shared between mongos and mongod, and should
// not be inside ShardedClientCursor.
BufBuilder buffer(ShardedClientCursor::INIT_REPLY_BUFFER_SIZE);
int numResults = 0;
for (const auto& obj : batch) {
buffer.appendBuf((void*)obj.objdata(), obj.objsize());
numResults++;
}
replyToQuery(0, // query result flags
r.p(),
r.m(),
buffer.buf(),
buffer.len(),
numResults,
0, // startingFrom
cursorId.getValue());
return;
}
QuerySpec qSpec((string)q.ns, q.query, q.fields, q.ntoskip, q.ntoreturn, q.queryOptions);
// Parse "$maxTimeMS".
StatusWith<int> maxTimeMS = LiteParsedQuery::parseMaxTimeMSQuery(q.query);
uassert(17233, maxTimeMS.getStatus().reason(), maxTimeMS.isOK());
if (_isSystemIndexes(q.ns) && doShardedIndexQuery(txn, r, qSpec)) {
return;
}
ParallelSortClusteredCursor* cursor = new ParallelSortClusteredCursor(qSpec, CommandInfo());
verify(cursor);
// TODO: Move out to Request itself, not strategy based
try {
cursor->init(txn);
//.........这里部分代码省略.........
示例4: prepare
Status ModifierRename::prepare(mutablebson::Element root,
const StringData& matchedField,
ExecInfo* execInfo) {
// Rename doesn't work with positional fields ($)
dassert(matchedField.empty());
_preparedState.reset(new PreparedState(root));
// Locate the to field name in 'root', which must exist.
size_t fromIdxFound;
Status status = pathsupport::findLongestPrefix(_fromFieldRef,
root,
&fromIdxFound,
&_preparedState->fromElemFound);
// If we can't find the full element in the from field then we can't do anything.
if (!status.isOK()) {
execInfo->noOp = true;
_preparedState->fromElemFound = root.getDocument().end();
// TODO: remove this special case from existing behavior
if (status.code() == ErrorCodes::PathNotViable) {
return status;
}
return Status::OK();
}
// Ensure no array in ancestry if what we found is not at the root
mutablebson::Element curr = _preparedState->fromElemFound.parent();
if (curr != curr.getDocument().root())
while (curr.ok() && (curr != curr.getDocument().root())) {
if (curr.getType() == Array)
return Status(ErrorCodes::BadValue,
str::stream() << "The source field cannot be an array element, '"
<< _fromFieldRef.dottedField() << "' in doc with "
<< findElementNamed(root.leftChild(), "_id").toString()
<< " has an array field called '" << curr.getFieldName() << "'");
curr = curr.parent();
}
// "To" side validation below
status = pathsupport::findLongestPrefix(_toFieldRef,
root,
&_preparedState->toIdxFound,
&_preparedState->toElemFound);
// FindLongestPrefix may return not viable or any other error and then we cannot proceed.
if (status.code() == ErrorCodes::NonExistentPath) {
// Not an error condition as we will create the "to" path as needed.
} else if (!status.isOK()) {
return status;
}
const bool destExists = _preparedState->toElemFound.ok() &&
(_preparedState->toIdxFound == (_toFieldRef.numParts()-1));
// Ensure no array in ancestry of "to" Element
// Set to either parent, or node depending on if the full path element was found
curr = (destExists ? _preparedState->toElemFound.parent() : _preparedState->toElemFound);
if (curr != curr.getDocument().root()) {
while (curr.ok()) {
if (curr.getType() == Array)
return Status(ErrorCodes::BadValue,
str::stream()
<< "The destination field cannot be an array element, '"
<< _fromFieldRef.dottedField() << "' in doc with "
<< findElementNamed(root.leftChild(), "_id").toString()
<< " has an array field called '" << curr.getFieldName() << "'");
curr = curr.parent();
}
}
// We register interest in the field name. The driver needs this info to sort out if
// there is any conflict among mods.
execInfo->fieldRef[0] = &_fromFieldRef;
execInfo->fieldRef[1] = &_toFieldRef;
execInfo->noOp = false;
return Status::OK();
}
示例5: _tabulateHeartbeatResponse
void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& request,
const executor::RemoteCommandResponse& response) {
++_numResponses;
if (!response.isOK()) {
warning() << "Failed to complete heartbeat request to " << request.target << "; "
<< response.status;
_badResponses.push_back(std::make_pair(request.target, response.status));
return;
}
BSONObj resBSON = response.data;
ReplSetHeartbeatResponse hbResp;
Status hbStatus = hbResp.initialize(resBSON, 0);
if (hbStatus.code() == ErrorCodes::InconsistentReplicaSetNames) {
std::string message = str::stream() << "Our set name did not match that of "
<< request.target.toString();
_vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
warning() << message;
return;
}
if (!hbStatus.isOK() && hbStatus != ErrorCodes::InvalidReplicaSetConfig) {
warning() << "Got error (" << hbStatus << ") response on heartbeat request to "
<< request.target << "; " << hbResp;
_badResponses.push_back(std::make_pair(request.target, hbStatus));
return;
}
if (!hbResp.getReplicaSetName().empty()) {
if (hbResp.getConfigVersion() >= _rsConfig->getConfigVersion()) {
std::string message = str::stream()
<< "Our config version of " << _rsConfig->getConfigVersion()
<< " is no larger than the version on " << request.target.toString()
<< ", which is " << hbResp.getConfigVersion();
_vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
warning() << message;
return;
}
}
if (_rsConfig->hasReplicaSetId()) {
StatusWith<rpc::ReplSetMetadata> replMetadata =
rpc::ReplSetMetadata::readFromMetadata(response.metadata);
if (replMetadata.isOK() && replMetadata.getValue().getReplicaSetId().isSet() &&
_rsConfig->getReplicaSetId() != replMetadata.getValue().getReplicaSetId()) {
std::string message = str::stream()
<< "Our replica set ID of " << _rsConfig->getReplicaSetId()
<< " did not match that of " << request.target.toString() << ", which is "
<< replMetadata.getValue().getReplicaSetId();
_vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
warning() << message;
}
}
const bool isInitialConfig = _rsConfig->getConfigVersion() == 1;
if (isInitialConfig && hbResp.hasData()) {
std::string message = str::stream() << "'" << request.target.toString()
<< "' has data already, cannot initiate set.";
_vetoStatus = Status(ErrorCodes::CannotInitializeNodeWithData, message);
warning() << message;
return;
}
for (int i = 0; i < _rsConfig->getNumMembers(); ++i) {
const MemberConfig& memberConfig = _rsConfig->getMemberAt(i);
if (memberConfig.getHostAndPort() != request.target) {
continue;
}
if (memberConfig.isElectable()) {
++_numElectable;
}
if (memberConfig.isVoter()) {
_voters.push_back(request.target);
}
return;
}
invariant(false);
}
示例6: buildTargetError
static void buildTargetError(const Status& errStatus, WriteErrorDetail* details) {
details->setErrCode(errStatus.code());
details->setErrMessage(errStatus.reason());
}
示例7: _handleHeartbeatResponse
void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
const ReplicationExecutor::RemoteCommandCallbackData& cbData, int targetIndex) {
// remove handle from queued heartbeats
_untrackHeartbeatHandle(cbData.myHandle);
// Parse and validate the response. At the end of this step, if responseStatus is OK then
// hbResponse is valid.
Status responseStatus = cbData.response.getStatus();
if (responseStatus == ErrorCodes::CallbackCanceled) {
return;
}
const HostAndPort& target = cbData.request.target;
ReplSetHeartbeatResponse hbResponse;
BSONObj resp;
if (responseStatus.isOK()) {
resp = cbData.response.getValue().data;
responseStatus = hbResponse.initialize(resp);
}
const bool isUnauthorized = (responseStatus.code() == ErrorCodes::Unauthorized) ||
(responseStatus.code() == ErrorCodes::AuthenticationFailed);
const Date_t now = _replExecutor.now();
const OpTime lastApplied = getMyLastOptime(); // Locks and unlocks _mutex.
Milliseconds networkTime(0);
StatusWith<ReplSetHeartbeatResponse> hbStatusResponse(hbResponse);
if (responseStatus.isOK()) {
networkTime = cbData.response.getValue().elapsedMillis;
}
else {
log() << "Error in heartbeat request to " << target << "; " << responseStatus;
if (!resp.isEmpty()) {
LOG(3) << "heartbeat response: " << resp;
}
if (isUnauthorized) {
networkTime = cbData.response.getValue().elapsedMillis;
}
hbStatusResponse = StatusWith<ReplSetHeartbeatResponse>(responseStatus);
}
HeartbeatResponseAction action =
_topCoord->processHeartbeatResponse(
now,
networkTime,
target,
hbStatusResponse,
lastApplied);
if (action.getAction() == HeartbeatResponseAction::NoAction &&
hbStatusResponse.isOK() &&
hbStatusResponse.getValue().hasOpTime() &&
targetIndex >= 0) {
boost::lock_guard<boost::mutex> lk(_mutex);
if (hbStatusResponse.getValue().getVersion() == _rsConfig.getConfigVersion()) {
_updateOpTimeFromHeartbeat_inlock(targetIndex,
hbStatusResponse.getValue().getOpTime());
}
}
_signalStepDownWaiters();
_scheduleHeartbeatToTarget(
target,
targetIndex,
std::max(now, action.getNextHeartbeatStartDate()));
_handleHeartbeatResponseAction(action, hbStatusResponse);
}
示例8: queryOp
void Strategy::queryOp( Request& r ) {
verify( !NamespaceString( r.getns() ).isCommand() );
Timer queryTimer;
QueryMessage q( r.d() );
NamespaceString ns(q.ns);
ClientBasic* client = ClientBasic::getCurrent();
AuthorizationSession* authSession = AuthorizationSession::get(client);
Status status = authSession->checkAuthForQuery(ns, q.query);
audit::logQueryAuthzCheck(client, ns, q.query, status.code());
uassertStatusOK(status);
LOG(3) << "query: " << q.ns << " " << q.query << " ntoreturn: " << q.ntoreturn
<< " options: " << q.queryOptions << endl;
if ( q.ntoreturn == 1 && strstr(q.ns, ".$cmd") )
throw UserException( 8010 , "something is wrong, shouldn't see a command here" );
if (q.queryOptions & QueryOption_Exhaust) {
uasserted(18526,
string("the 'exhaust' query option is invalid for mongos queries: ") + q.ns
+ " " + q.query.toString());
}
QuerySpec qSpec( (string)q.ns, q.query, q.fields, q.ntoskip, q.ntoreturn, q.queryOptions );
// Parse "$maxTimeMS".
StatusWith<int> maxTimeMS = LiteParsedQuery::parseMaxTimeMSQuery( q.query );
uassert( 17233,
maxTimeMS.getStatus().reason(),
maxTimeMS.isOK() );
if ( _isSystemIndexes( q.ns ) && doShardedIndexQuery( r, qSpec )) {
return;
}
ParallelSortClusteredCursor * cursor = new ParallelSortClusteredCursor( qSpec, CommandInfo() );
verify( cursor );
// TODO: Move out to Request itself, not strategy based
try {
cursor->init();
if ( qSpec.isExplain() ) {
BSONObjBuilder explain_builder;
cursor->explain( explain_builder );
explain_builder.appendNumber( "executionTimeMillis",
static_cast<long long>(queryTimer.millis()) );
BSONObj b = explain_builder.obj();
replyToQuery( 0 , r.p() , r.m() , b );
delete( cursor );
return;
}
}
catch(...) {
delete cursor;
throw;
}
// TODO: Revisit all of this when we revisit the sharded cursor cache
if (cursor->getNumQueryShards() != 1) {
// More than one shard (or zero), manage with a ShardedClientCursor
// NOTE: We may also have *zero* shards here when the returnPartial flag is set.
// Currently the code in ShardedClientCursor handles this.
ShardedClientCursorPtr cc (new ShardedClientCursor( q , cursor ));
BufBuilder buffer( ShardedClientCursor::INIT_REPLY_BUFFER_SIZE );
int docCount = 0;
const int startFrom = cc->getTotalSent();
bool hasMore = cc->sendNextBatch(q.ntoreturn, buffer, docCount);
if ( hasMore ) {
LOG(5) << "storing cursor : " << cc->getId() << endl;
int cursorLeftoverMillis = maxTimeMS.getValue() - queryTimer.millis();
if ( maxTimeMS.getValue() == 0 ) { // 0 represents "no limit".
cursorLeftoverMillis = kMaxTimeCursorNoTimeLimit;
}
else if ( cursorLeftoverMillis <= 0 ) {
cursorLeftoverMillis = kMaxTimeCursorTimeLimitExpired;
}
cursorCache.store( cc, cursorLeftoverMillis );
}
replyToQuery( 0, r.p(), r.m(), buffer.buf(), buffer.len(), docCount,
startFrom, hasMore ? cc->getId() : 0 );
}
else{
// Only one shard is used
// Remote cursors are stored remotely, we shouldn't need this around.
//.........这里部分代码省略.........
示例9: commitBulk
Status IndexAccessMethod::commitBulk(OperationContext* txn,
std::unique_ptr<BulkBuilder> bulk,
bool mayInterrupt,
bool dupsAllowed,
set<RecordId>* dupsToDrop) {
Timer timer;
std::unique_ptr<BulkBuilder::Sorter::Iterator> i(bulk->_sorter->done());
stdx::unique_lock<Client> lk(*txn->getClient());
ProgressMeterHolder pm(*txn->setMessage_inlock("Index Bulk Build: (2/3) btree bottom up",
"Index: (2/3) BTree Bottom Up Progress",
bulk->_keysInserted,
10));
lk.unlock();
std::unique_ptr<SortedDataBuilderInterface> builder;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
WriteUnitOfWork wunit(txn);
if (bulk->_isMultiKey) {
_btreeState->setMultikey(txn);
}
builder.reset(_newInterface->getBulkBuilder(txn, dupsAllowed));
wunit.commit();
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "setting index multikey flag", "");
while (i->more()) {
if (mayInterrupt) {
txn->checkForInterrupt();
}
WriteUnitOfWork wunit(txn);
// Improve performance in the btree-building phase by disabling rollback tracking.
// This avoids copying all the written bytes to a buffer that is only used to roll back.
// Note that this is safe to do, as this entire index-build-in-progress will be cleaned
// up by the index system.
txn->recoveryUnit()->setRollbackWritesDisabled();
// Get the next datum and add it to the builder.
BulkBuilder::Sorter::Data d = i->next();
Status status = builder->addKey(d.first, d.second);
if (!status.isOK()) {
// Overlong key that's OK to skip?
if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
continue;
}
// Check if this is a duplicate that's OK to skip
if (status.code() == ErrorCodes::DuplicateKey) {
invariant(!dupsAllowed); // shouldn't be getting DupKey errors if dupsAllowed.
if (dupsToDrop) {
dupsToDrop->insert(d.second);
continue;
}
}
return status;
}
// If we're here either it's a dup and we're cool with it or the addKey went just
// fine.
pm.hit();
wunit.commit();
}
pm.finished();
{
stdx::lock_guard<Client> lk(*txn->getClient());
CurOp::get(txn)->setMessage_inlock("Index Bulk Build: (3/3) btree-middle",
"Index: (3/3) BTree Middle Progress");
}
LOG(timer.seconds() > 10 ? 0 : 1) << "\t done building bottom layer, going to commit";
builder->commit(mayInterrupt);
return Status::OK();
}
示例10: whyMessage
StatusWith<ForwardingCatalogManager::ScopedDistLock*>
ChunkMoveOperationState::acquireMoveMetadata() {
// Get the distributed lock
const string whyMessage(stream() << "migrating chunk [" << _minKey << ", " << _maxKey << ") in "
<< _nss.ns());
_distLockStatus = grid.forwardingCatalogManager()->distLock(_txn, _nss.ns(), whyMessage);
if (!_distLockStatus->isOK()) {
const string msg = stream() << "could not acquire collection lock for " << _nss.ns()
<< " to migrate chunk [" << _minKey << "," << _maxKey << ")"
<< causedBy(_distLockStatus->getStatus());
warning() << msg;
return Status(_distLockStatus->getStatus().code(), msg);
}
ShardingState* const shardingState = ShardingState::get(_txn);
// Snapshot the metadata
Status refreshStatus = shardingState->refreshMetadataNow(_txn, _nss.ns(), &_shardVersion);
if (!refreshStatus.isOK()) {
const string msg = stream() << "moveChunk cannot start migrate of chunk "
<< "[" << _minKey << "," << _maxKey << ")"
<< causedBy(refreshStatus.reason());
warning() << msg;
return Status(refreshStatus.code(), msg);
}
if (_shardVersion.majorVersion() == 0) {
// It makes no sense to migrate if our version is zero and we have no chunks
const string msg = stream() << "moveChunk cannot start migrate of chunk "
<< "[" << _minKey << "," << _maxKey << ")"
<< " with zero shard version";
warning() << msg;
return Status(ErrorCodes::IncompatibleShardingMetadata, msg);
}
{
// Mongos >= v3.2 sends the full version, v3.0 only sends the epoch.
// TODO(SERVER-20742): Stop parsing epoch separately after 3.2.
auto& operationVersion = OperationShardVersion::get(_txn);
if (operationVersion.hasShardVersion()) {
_collectionVersion = operationVersion.getShardVersion(_nss);
_collectionEpoch = _collectionVersion.epoch();
} // else the epoch will already be set from the parsing of the ChunkMoveOperationState
if (_collectionEpoch != _shardVersion.epoch()) {
const string msg = stream() << "moveChunk cannot move chunk "
<< "[" << _minKey << "," << _maxKey << "), "
<< "collection may have been dropped. "
<< "current epoch: " << _shardVersion.epoch()
<< ", cmd epoch: " << _collectionEpoch;
warning() << msg;
throw SendStaleConfigException(_nss.toString(), msg, _collectionVersion, _shardVersion);
}
}
_collMetadata = shardingState->getCollectionMetadata(_nss.ns());
// With nonzero shard version, we must have a coll version >= our shard version
invariant(_collMetadata->getCollVersion() >= _shardVersion);
// With nonzero shard version, we must have a shard key
invariant(!_collMetadata->getKeyPattern().isEmpty());
ChunkType origChunk;
if (!_collMetadata->getNextChunk(_minKey, &origChunk) ||
origChunk.getMin().woCompare(_minKey) || origChunk.getMax().woCompare(_maxKey)) {
// Our boundaries are different from those passed in
const string msg = stream() << "moveChunk cannot find chunk "
<< "[" << _minKey << "," << _maxKey << ")"
<< " to migrate, the chunk boundaries may be stale";
warning() << msg;
throw SendStaleConfigException(_nss.toString(), msg, _collectionVersion, _shardVersion);
}
return &_distLockStatus->getValue();
}
示例11: insertConfigDocument
Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) {
invariant(nss.db() == NamespaceString::kAdminDb || nss.db() == NamespaceString::kConfigDb);
const BSONElement idField = doc.getField("_id");
invariant(!idField.eoo());
BatchedCommandRequest request([&] {
write_ops::Insert insertOp(nss);
insertOp.setDocuments({doc});
return insertOp;
}());
request.setWriteConcern(writeConcern.toBSON());
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
for (int retry = 1; retry <= kMaxWriteRetry; retry++) {
auto response = configShard->runBatchWriteCommand(
opCtx, Shard::kDefaultConfigCommandTimeout, request, Shard::RetryPolicy::kNoRetry);
Status status = response.toStatus();
if (retry < kMaxWriteRetry &&
configShard->isRetriableError(status.code(), Shard::RetryPolicy::kIdempotent)) {
// Pretend like the operation is idempotent because we're handling DuplicateKey errors
// specially
continue;
}
// If we get DuplicateKey error on the first attempt to insert, this definitively means that
// we are trying to insert the same entry a second time, so error out. If it happens on a
// retry attempt though, it is not clear whether we are actually inserting a duplicate key
// or it is because we failed to wait for write concern on the first attempt. In order to
// differentiate, fetch the entry and check.
if (retry > 1 && status == ErrorCodes::DuplicateKey) {
LOG(1) << "Insert retry failed because of duplicate key error, rechecking.";
auto fetchDuplicate =
_exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
nss,
idField.wrap(),
BSONObj(),
boost::none);
if (!fetchDuplicate.isOK()) {
return fetchDuplicate.getStatus();
}
auto existingDocs = fetchDuplicate.getValue().value;
if (existingDocs.empty()) {
return {status.withContext(
stream() << "DuplicateKey error was returned after a retry attempt, but no "
"documents were found. This means a concurrent change occurred "
"together with the retries.")};
}
invariant(existingDocs.size() == 1);
BSONObj existing = std::move(existingDocs.front());
if (existing.woCompare(doc) == 0) {
// Documents match, so treat the operation as success
return Status::OK();
}
}
return status;
}
MONGO_UNREACHABLE;
}
示例12: insert
DiskLoc DataFileMgr::insert(const char* ns,
const void* obuf,
int32_t len,
bool mayInterrupt,
bool god,
bool mayAddIndex,
bool* addedID) {
Database* database = cc().database();
bool wouldAddIndex = false;
massert( 10093 , "cannot insert into reserved $ collection", god || NamespaceString::normal( ns ) );
uassert( 10094 , str::stream() << "invalid ns: " << ns , isValidNS( ns ) );
{
const char *sys = strstr(ns, "system.");
if ( sys ) {
if ( !insert_checkSys(sys, ns, wouldAddIndex, obuf, god) )
return DiskLoc();
if ( mayAddIndex && wouldAddIndex ) {
// TODO: this should be handled above this function
BSONObj spec( static_cast<const char*>( obuf ) );
string collectionToIndex = spec.getStringField( "ns" );
uassert(10096, "invalid ns to index", collectionToIndex.find( '.' ) != string::npos);
massert(10097,
str::stream() << "trying to create index on wrong db "
<< " db: " << database->name() << " collection: " << collectionToIndex,
database->ownsNS( collectionToIndex ) );
Collection* collection = database->getCollection( collectionToIndex );
if ( !collection ) {
collection = database->createCollection( collectionToIndex, false, NULL, true );
verify( collection );
if ( !god )
ensureIdIndexForNewNs( collection );
}
Status status = collection->getIndexCatalog()->createIndex( spec, mayInterrupt );
if ( status.code() == ErrorCodes::IndexAlreadyExists )
return DiskLoc();
uassertStatusOK( status );
return DiskLoc();
}
}
}
Collection* collection = database->getCollection( ns );
if ( collection == NULL ) {
collection = database->createCollection( ns, false, NULL, false );
int ies = Extent::initialSize(len);
if( str::contains(ns, '$') &&
len + Record::HeaderSize >= BtreeData_V1::BucketSize - 256 &&
len + Record::HeaderSize <= BtreeData_V1::BucketSize + 256 ) {
// probably an index. so we pick a value here for the first extent instead of using
// initialExtentSize() which is more for user collections.
// TODO: we could look at the # of records in the parent collection to be smarter here.
ies = (32+4) * 1024;
}
collection->increaseStorageSize( ies, false);
if ( !god )
ensureIdIndexForNewNs( collection );
}
NamespaceDetails* d = collection->details();
IDToInsert idToInsert; // only initialized if needed
if( !god ) {
/* Check if we have an _id field. If we don't, we'll add it.
Note that btree buckets which we insert aren't BSONObj's, but in that case god==true.
*/
BSONObj io((const char *) obuf);
BSONElement idField = io.getField( "_id" );
uassert( 10099 , "_id cannot be an array", idField.type() != Array );
// we don't add _id for capped collections in local as they don't have an _id index
if( idField.eoo() &&
!wouldAddIndex &&
nsToDatabase( ns ) != "local" &&
d->haveIdIndex() ) {
if( addedID )
*addedID = true;
idToInsert.init();
len += idToInsert.size();
}
BSONElementManipulator::lookForTimestamps( io );
}
int lenWHdr = d->getRecordAllocationSize( len + Record::HeaderSize );
fassert( 16440, lenWHdr >= ( len + Record::HeaderSize ) );
// If the collection is capped, check if the new object will violate a unique index
// constraint before allocating space.
if ( d->isCapped() && !god) {
BSONObj temp = BSONObj( reinterpret_cast<const char *>( obuf ) );
Status ret = collection->getIndexCatalog()->checkNoIndexConflicts( temp );
//.........这里部分代码省略.........
示例13: _produce
//.........这里部分代码省略.........
OpTimeWithHash(lastHashFetched, lastOpTimeFetched),
source,
NamespaceString(rsOplogName),
config,
&dataReplicatorExternalState,
stdx::bind(&BackgroundSync::_enqueueDocuments,
this,
stdx::placeholders::_1,
stdx::placeholders::_2,
stdx::placeholders::_3,
stdx::placeholders::_4),
onOplogFetcherShutdownCallbackFn);
oplogFetcher = _oplogFetcher.get();
} catch (const mongo::DBException& ex) {
fassertFailedWithStatus(34440, exceptionToStatus());
}
LOG(1) << "scheduling fetcher to read remote oplog on " << _syncSourceHost << " starting at "
<< oplogFetcher->getCommandObject_forTest()["filter"];
auto scheduleStatus = oplogFetcher->startup();
if (!scheduleStatus.isOK()) {
warning() << "unable to schedule fetcher to read remote oplog on " << source << ": "
<< scheduleStatus;
return;
}
oplogFetcher->join();
LOG(1) << "fetcher stopped reading remote oplog on " << source;
// If the background sync is stopped after the fetcher is started, we need to
// re-evaluate our sync source and oplog common point.
if (isStopped()) {
return;
}
if (fetcherReturnStatus.code() == ErrorCodes::OplogOutOfOrder) {
// This is bad because it means that our source
// has not returned oplog entries in ascending ts order, and they need to be.
warning() << fetcherReturnStatus.toString();
// Do not blacklist the server here, it will be blacklisted when we try to reuse it,
// if it can't return a matching oplog start from the last fetch oplog ts field.
return;
} else if (fetcherReturnStatus.code() == ErrorCodes::OplogStartMissing ||
fetcherReturnStatus.code() == ErrorCodes::RemoteOplogStale) {
// Rollback is a synchronous operation that uses the task executor and may not be
// executed inside the fetcher callback.
const int messagingPortTags = 0;
ConnectionPool connectionPool(messagingPortTags);
std::unique_ptr<ConnectionPool::ConnectionPtr> connection;
auto getConnection = [&connection, &connectionPool, source]() -> DBClientBase* {
if (!connection.get()) {
connection.reset(new ConnectionPool::ConnectionPtr(
&connectionPool, source, Date_t::now(), kOplogSocketTimeout));
};
return connection->get();
};
{
stdx::lock_guard<stdx::mutex> lock(_mutex);
lastOpTimeFetched = _lastOpTimeFetched;
}
log() << "Starting rollback due to " << fetcherReturnStatus;
// Wait till all buffered oplog entries have drained and been applied.
auto lastApplied = _replCoord->getMyLastAppliedOpTime();
if (lastApplied != lastOpTimeFetched) {
log() << "Waiting for all operations from " << lastApplied << " until "
<< lastOpTimeFetched << " to be applied before starting rollback.";
while (lastOpTimeFetched > (lastApplied = _replCoord->getMyLastAppliedOpTime())) {
sleepmillis(10);
if (isStopped() || inShutdown()) {
return;
}
}
}
// check that we are at minvalid, otherwise we cannot roll back as we may be in an
// inconsistent state
BatchBoundaries boundaries = StorageInterface::get(txn)->getMinValid(txn);
if (!boundaries.start.isNull() || boundaries.end > lastApplied) {
fassertNoTrace(18750,
Status(ErrorCodes::UnrecoverableRollbackError,
str::stream()
<< "need to rollback, but in inconsistent state. "
<< "minvalid: " << boundaries.end.toString()
<< " > our last optime: " << lastApplied.toString()));
}
_rollback(txn, source, getConnection);
stop();
} else if (fetcherReturnStatus == ErrorCodes::InvalidBSON) {
Seconds blacklistDuration(60);
warning() << "Fetcher got invalid BSON while querying oplog. Blacklisting sync source "
<< source << " for " << blacklistDuration << ".";
_replCoord->blacklistSyncSource(source, Date_t::now() + blacklistDuration);
} else if (!fetcherReturnStatus.isOK()) {
warning() << "Fetcher error querying oplog: " << fetcherReturnStatus.toString();
}
}
示例14: run
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
int s = 0;
bool found = false;
// TODO: convert to ServerParameters -- SERVER-10515
if( cmdObj.hasElement("journalCommitInterval") ) {
if (isMongos()) {
errmsg = "cannot set journalCommitInterval on a mongos";
return false;
}
if(!isJournalingEnabled()) {
errmsg = "journaling is off";
return false;
}
int x = (int) cmdObj["journalCommitInterval"].Number();
verify( x > 1 && x < 500 );
setJournalCommitInterval(x);
log() << "setParameter journalCommitInterval=" << x << endl;
s++;
}
if( cmdObj.hasElement( "traceExceptions" ) ) {
if( s == 0 ) result.append( "was", DBException::traceExceptions );
DBException::traceExceptions = cmdObj["traceExceptions"].Bool();
s++;
}
if( cmdObj.hasElement( "replMonitorMaxFailedChecks" ) ) {
if( s == 0 ) result.append( "was", ReplicaSetMonitor::maxConsecutiveFailedChecks );
ReplicaSetMonitor::maxConsecutiveFailedChecks =
cmdObj["replMonitorMaxFailedChecks"].numberInt();
s++;
}
const ServerParameter::Map& m = ServerParameterSet::getGlobal()->getMap();
BSONObjIterator i( cmdObj );
i.next(); // skip past command name
while ( i.more() ) {
BSONElement e = i.next();
ServerParameter::Map::const_iterator j = m.find( e.fieldName() );
if ( j == m.end() )
continue;
if ( ! j->second->allowedToChangeAtRuntime() ) {
errmsg = str::stream()
<< "not allowed to change ["
<< e.fieldName()
<< "] at runtime";
return false;
}
if ( s == 0 )
j->second->append( result, "was" );
Status status = j->second->set( e );
if ( status.isOK() ) {
s++;
continue;
}
errmsg = status.reason();
result.append( "code", status.code() );
return false;
}
if( s == 0 && !found ) {
errmsg = "no option found to set, use help:true to see options ";
return false;
}
return true;
}
示例15: prepare
Status ModifierPullAll::prepare(mutablebson::Element root,
StringData matchedField,
ExecInfo* execInfo) {
_preparedState.reset(new PreparedState(&root.getDocument()));
// If we have a $-positional field, it is time to bind it to an actual field part.
if (_positionalPathIndex) {
if (matchedField.empty()) {
return Status(ErrorCodes::BadValue,
str::stream() << "The positional operator did not find the match "
"needed from the query. Unexpanded update: "
<< _fieldRef.dottedField());
}
_fieldRef.setPart(_positionalPathIndex, matchedField);
}
// Locate the field name in 'root'. Note that if we don't have the full path in the
// doc, there isn't anything to unset, really.
Status status = pathsupport::findLongestPrefix(
_fieldRef, root, &_preparedState->pathFoundIndex, &_preparedState->pathFoundElement);
// Check if we didn't find the full path
if (status.isOK()) {
const bool destExists = (_preparedState->pathFoundIndex == (_fieldRef.numParts() - 1));
if (!destExists) {
execInfo->noOp = true;
} else {
// If the path exists, we require the target field to be already an
// array.
if (_preparedState->pathFoundElement.getType() != Array) {
mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
return Status(
ErrorCodes::BadValue,
str::stream() << "Can only apply $pullAll to an array. " << idElem.toString()
<< " has the field "
<< _preparedState->pathFoundElement.getFieldName()
<< " of non-array type "
<< typeName(_preparedState->pathFoundElement.getType()));
}
// No children, nothing to do -- not an error state
if (!_preparedState->pathFoundElement.hasChildren()) {
execInfo->noOp = true;
} else {
mutablebson::Element elem = _preparedState->pathFoundElement.leftChild();
while (elem.ok()) {
if (std::find_if(_elementsToFind.begin(),
_elementsToFind.end(),
mutableElementEqualsBSONElement(elem, _collator)) !=
_elementsToFind.end()) {
_preparedState->elementsToRemove.push_back(elem);
}
elem = elem.rightSibling();
}
// Nothing to remove so it is a noOp.
if (_preparedState->elementsToRemove.empty())
execInfo->noOp = true;
}
}
} else {
// Let the caller know we can't do anything given the mod, _fieldRef, and doc.
execInfo->noOp = true;
// okay if path not found
if (status.code() == ErrorCodes::NonExistentPath)
status = Status::OK();
}
// Let the caller know what field we care about
execInfo->fieldRef[0] = &_fieldRef;
return status;
}