本文整理汇总了C++中WriteConcernOptions::toBSON方法的典型用法代码示例。如果您正苦于以下问题:C++ WriteConcernOptions::toBSON方法的具体用法?C++ WriteConcernOptions::toBSON怎么用?C++ WriteConcernOptions::toBSON使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类WriteConcernOptions
的用法示例。
在下文中一共展示了WriteConcernOptions::toBSON方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: validateWriteConcern
Status validateWriteConcern(OperationContext* txn,
const WriteConcernOptions& writeConcern,
const std::string& dbName) {
const bool isJournalEnabled = getGlobalServiceContext()->getGlobalStorageEngine()->isDurable();
if (writeConcern.syncMode == WriteConcernOptions::SyncMode::JOURNAL && !isJournalEnabled) {
return Status(ErrorCodes::BadValue,
"cannot use 'j' option when a host does not have journaling enabled");
}
const bool isConfigServer = serverGlobalParams.clusterRole == ClusterRole::ConfigServer;
const bool isLocalDb(dbName == kLocalDB);
const repl::ReplicationCoordinator::Mode replMode =
repl::getGlobalReplicationCoordinator()->getReplicationMode();
if (isConfigServer) {
if (!writeConcern.validForConfigServers()) {
return Status(
ErrorCodes::BadValue,
str::stream()
<< "w:1 and w:'majority' are the only valid write concerns when writing to "
"config servers, got: "
<< writeConcern.toBSON().toString());
}
if (replMode == repl::ReplicationCoordinator::modeReplSet &&
// Allow writes performed within the server to have a write concern of { w: 1 }.
// This is so commands have the option to skip waiting for replication if they are
// holding locks (ex. addShardToZone). This also allows commands that perform
// multiple writes to batch the wait at the end.
!txn->getClient()->isInDirectClient() &&
!isLocalDb && writeConcern.wMode.empty()) {
invariant(writeConcern.wNumNodes == 1);
return Status(
ErrorCodes::BadValue,
str::stream()
<< "w: 'majority' is the only valid write concern when writing to config "
"server replica sets, got: "
<< writeConcern.toBSON().toString());
}
}
if (replMode == repl::ReplicationCoordinator::modeNone) {
if (writeConcern.wNumNodes > 1) {
return Status(ErrorCodes::BadValue, "cannot use 'w' > 1 when a host is not replicated");
}
}
if (replMode != repl::ReplicationCoordinator::modeReplSet && !writeConcern.wMode.empty() &&
writeConcern.wMode != WriteConcernOptions::kMajority) {
return Status(ErrorCodes::BadValue,
string("cannot use non-majority 'w' mode ") + writeConcern.wMode +
" when a host is not a member of a replica set");
}
return Status::OK();
}
示例2: validateWriteConcern
Status validateWriteConcern(const WriteConcernOptions& writeConcern, const std::string& dbName) {
const bool isJournalEnabled = getGlobalServiceContext()->getGlobalStorageEngine()->isDurable();
if (writeConcern.syncMode == WriteConcernOptions::JOURNAL && !isJournalEnabled) {
return Status(ErrorCodes::BadValue,
"cannot use 'j' option when a host does not have journaling enabled");
}
const bool isConfigServer = serverGlobalParams.configsvr;
const bool isLocalDb(dbName == kLocalDB);
const repl::ReplicationCoordinator::Mode replMode =
repl::getGlobalReplicationCoordinator()->getReplicationMode();
if (isConfigServer) {
if (!writeConcern.validForConfigServers()) {
return Status(
ErrorCodes::BadValue,
str::stream()
<< "w:1 and w:'majority' are the only valid write concerns when writing to "
"config servers, got: " << writeConcern.toBSON().toString());
}
if (serverGlobalParams.configsvrMode == CatalogManager::ConfigServerMode::CSRS &&
replMode == repl::ReplicationCoordinator::modeReplSet && !isLocalDb &&
writeConcern.wMode.empty()) {
invariant(writeConcern.wNumNodes == 1);
return Status(
ErrorCodes::BadValue,
str::stream()
<< "w: 'majority' is the only valid write concern when writing to config "
"server replica sets, got: " << writeConcern.toBSON().toString());
}
}
if (replMode == repl::ReplicationCoordinator::modeNone) {
if (writeConcern.wNumNodes > 1) {
return Status(ErrorCodes::BadValue, "cannot use 'w' > 1 when a host is not replicated");
}
}
if (replMode != repl::ReplicationCoordinator::modeReplSet && !writeConcern.wMode.empty() &&
writeConcern.wMode != WriteConcernOptions::kMajority) {
return Status(ErrorCodes::BadValue,
string("cannot use non-majority 'w' mode ") + writeConcern.wMode +
" when a host is not a member of a replica set");
}
return Status::OK();
}
示例3: appendMajorityWriteConcern
BSONObj CommandHelpers::appendMajorityWriteConcern(const BSONObj& cmdObj) {
WriteConcernOptions newWC = kMajorityWriteConcern;
if (cmdObj.hasField(kWriteConcernField)) {
auto wc = cmdObj.getField(kWriteConcernField);
// The command has a writeConcern field and it's majority, so we can
// return it as-is.
if (wc["w"].ok() && wc["w"].str() == "majority") {
return cmdObj;
}
if (wc["wtimeout"].ok()) {
// They set a timeout, but aren't using majority WC. We want to use their
// timeout along with majority WC.
newWC = WriteConcernOptions(WriteConcernOptions::kMajority,
WriteConcernOptions::SyncMode::UNSET,
wc["wtimeout"].Number());
}
}
// Append all original fields except the writeConcern field to the new command.
BSONObjBuilder cmdObjWithWriteConcern;
for (const auto& elem : cmdObj) {
const auto name = elem.fieldNameStringData();
if (name != "writeConcern" && !cmdObjWithWriteConcern.hasField(name)) {
cmdObjWithWriteConcern.append(elem);
}
}
// Finally, add the new write concern.
cmdObjWithWriteConcern.append(kWriteConcernField, newWC.toBSON());
return cmdObjWithWriteConcern.obj();
}
示例4: MigrationSecondaryThrottleOptions
MigrationSecondaryThrottleOptions MigrationSecondaryThrottleOptions::createWithWriteConcern(
const WriteConcernOptions& writeConcern) {
// Optimize on write concern, which makes no difference
if (writeConcern.wNumNodes <= 1 && writeConcern.wMode.empty()) {
return MigrationSecondaryThrottleOptions(kOff, boost::none);
}
return MigrationSecondaryThrottleOptions(kOn, writeConcern.toBSON());
}
示例5: request
StatusWith<bool> ShardingCatalogClientImpl::_updateConfigDocument(
OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& query,
const BSONObj& update,
bool upsert,
const WriteConcernOptions& writeConcern) {
invariant(nss.db() == NamespaceString::kConfigDb);
const BSONElement idField = query.getField("_id");
invariant(!idField.eoo());
BatchedCommandRequest request([&] {
write_ops::Update updateOp(nss);
updateOp.setUpdates({[&] {
write_ops::UpdateOpEntry entry;
entry.setQ(query);
entry.setU(update);
entry.setUpsert(upsert);
entry.setMulti(false);
return entry;
}()});
return updateOp;
}());
request.setWriteConcern(writeConcern.toBSON());
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto response = configShard->runBatchWriteCommand(
opCtx, Shard::kDefaultConfigCommandTimeout, request, Shard::RetryPolicy::kIdempotent);
Status status = response.toStatus();
if (!status.isOK()) {
return status;
}
const auto nSelected = response.getN();
invariant(nSelected == 0 || nSelected == 1);
return (nSelected == 1);
}
示例6: validateWriteConcern
Status validateWriteConcern(OperationContext* txn,
const WriteConcernOptions& writeConcern,
StringData dbName) {
if (writeConcern.syncMode == WriteConcernOptions::SyncMode::JOURNAL &&
!txn->getServiceContext()->getGlobalStorageEngine()->isDurable()) {
return Status(ErrorCodes::BadValue,
"cannot use 'j' option when a host does not have journaling enabled");
}
// Remote callers of the config server (as in callers making network calls, not the internal
// logic) should never be making non-majority writes against the config server, because sharding
// is not resilient against rollbacks of metadata writes.
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer &&
dbName != NamespaceString::kLocalDb && !writeConcern.validForConfigServers()) {
// The only cases where we allow non-majority writes are from within the config servers
// themselves, because these wait for write concern explicitly.
if (!txn->getClient()->isInDirectClient()) {
return {ErrorCodes::BadValue,
str::stream() << "w:'majority' is the only valid write concern when writing "
"to config servers, got: "
<< writeConcern.toBSON()};
}
}
const auto replMode = repl::ReplicationCoordinator::get(txn)->getReplicationMode();
if (replMode == repl::ReplicationCoordinator::modeNone && writeConcern.wNumNodes > 1) {
return Status(ErrorCodes::BadValue, "cannot use 'w' > 1 when a host is not replicated");
}
if (replMode != repl::ReplicationCoordinator::modeReplSet && !writeConcern.wMode.empty() &&
writeConcern.wMode != WriteConcernOptions::kMajority) {
return Status(ErrorCodes::BadValue,
string("cannot use non-majority 'w' mode ") + writeConcern.wMode +
" when a host is not a member of a replica set");
}
return Status::OK();
}
示例7: removeConfigDocuments
Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& query,
const WriteConcernOptions& writeConcern) {
invariant(nss.db() == NamespaceString::kConfigDb);
BatchedCommandRequest request([&] {
write_ops::Delete deleteOp(nss);
deleteOp.setDeletes({[&] {
write_ops::DeleteOpEntry entry;
entry.setQ(query);
entry.setMulti(true);
return entry;
}()});
return deleteOp;
}());
request.setWriteConcern(writeConcern.toBSON());
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto response = configShard->runBatchWriteCommand(
opCtx, Shard::kDefaultConfigCommandTimeout, request, Shard::RetryPolicy::kIdempotent);
return response.toStatus();
}
示例8: ChunkMoveWriteConcernOptions
StatusWith<ChunkMoveWriteConcernOptions> ChunkMoveWriteConcernOptions::initFromCommand(
const BSONObj& cmdObj) {
BSONObj secThrottleObj;
WriteConcernOptions writeConcernOptions;
Status status = writeConcernOptions.parseSecondaryThrottle(cmdObj, &secThrottleObj);
if (!status.isOK()) {
if (status.code() != ErrorCodes::WriteConcernNotDefined) {
return status;
}
writeConcernOptions = getDefaultWriteConcernForMigration();
} else {
repl::ReplicationCoordinator* replCoordinator = repl::getGlobalReplicationCoordinator();
if (replCoordinator->getReplicationMode() ==
repl::ReplicationCoordinator::modeMasterSlave &&
writeConcernOptions.shouldWaitForOtherNodes()) {
warning() << "moveChunk cannot check if secondary throttle setting "
<< writeConcernOptions.toBSON()
<< " can be enforced in a master slave configuration";
}
Status status = replCoordinator->checkIfWriteConcernCanBeSatisfied(writeConcernOptions);
if (!status.isOK() && status != ErrorCodes::NoReplicationEnabled) {
return status;
}
}
if (writeConcernOptions.shouldWaitForOtherNodes() &&
writeConcernOptions.wTimeout == WriteConcernOptions::kNoTimeout) {
// Don't allow no timeout
writeConcernOptions.wTimeout = kDefaultWriteTimeoutForMigrationMs;
}
return ChunkMoveWriteConcernOptions(secThrottleObj, writeConcernOptions);
}
示例9: warning
StatusWith<WriteConcernOptions> ChunkMoveWriteConcernOptions::getEffectiveWriteConcern(
const MigrationSecondaryThrottleOptions& options) {
if (options.getSecondaryThrottle() == MigrationSecondaryThrottleOptions::kOff) {
return kWriteConcernLocal;
}
WriteConcernOptions writeConcern;
if (options.isWriteConcernSpecified()) {
writeConcern = options.getWriteConcern();
repl::ReplicationCoordinator* replCoordinator = repl::getGlobalReplicationCoordinator();
if (replCoordinator->getReplicationMode() ==
repl::ReplicationCoordinator::modeMasterSlave &&
writeConcern.shouldWaitForOtherNodes()) {
warning() << "moveChunk cannot check if secondary throttle setting "
<< writeConcern.toBSON()
<< " can be enforced in a master slave configuration";
}
Status status = replCoordinator->checkIfWriteConcernCanBeSatisfied(writeConcern);
if (!status.isOK() && status != ErrorCodes::NoReplicationEnabled) {
return status;
}
} else {
writeConcern = getDefaultWriteConcernForMigration();
}
if (writeConcern.shouldWaitForOtherNodes() &&
writeConcern.wTimeout == WriteConcernOptions::kNoTimeout) {
// Don't allow no timeout
writeConcern.wTimeout = durationCount<Milliseconds>(kDefaultWriteTimeoutForMigration);
}
return writeConcern;
}
示例10: run
bool run(OperationContext* txn,
string const& db,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result,
bool) {
string ns;
if (!FieldParser::extract(cmdObj, nsField, &ns, &errmsg)) {
return false;
}
if (ns == "") {
errmsg = "no collection name specified";
return false;
}
BSONObj startingFromKey;
if (!FieldParser::extract(cmdObj, startingFromKeyField, &startingFromKey, &errmsg)) {
return false;
}
WriteConcernOptions writeConcern;
Status status = writeConcern.parseSecondaryThrottle(cmdObj, NULL);
if (!status.isOK()) {
if (status.code() != ErrorCodes::WriteConcernNotDefined) {
return appendCommandStatus(result, status);
}
writeConcern = DefaultWriteConcern;
} else {
repl::ReplicationCoordinator* replCoordinator = repl::getGlobalReplicationCoordinator();
Status status = replCoordinator->checkIfWriteConcernCanBeSatisfied(writeConcern);
if (replCoordinator->getReplicationMode() ==
repl::ReplicationCoordinator::modeMasterSlave &&
writeConcern.shouldWaitForOtherNodes()) {
warning() << "cleanupOrphaned cannot check if write concern setting "
<< writeConcern.toBSON()
<< " can be enforced in a master slave configuration";
}
if (!status.isOK() && status != ErrorCodes::NoReplicationEnabled) {
return appendCommandStatus(result, status);
}
}
if (writeConcern.shouldWaitForOtherNodes() &&
writeConcern.wTimeout == WriteConcernOptions::kNoTimeout) {
// Don't allow no timeout.
writeConcern.wTimeout = kDefaultWTimeoutMs;
}
if (!shardingState.enabled()) {
errmsg = str::stream() << "server is not part of a sharded cluster or "
<< "the sharding metadata is not yet initialized.";
return false;
}
ChunkVersion shardVersion;
status = shardingState.refreshMetadataNow(txn, ns, &shardVersion);
if (!status.isOK()) {
if (status.code() == ErrorCodes::RemoteChangeDetected) {
warning() << "Shard version in transition detected while refreshing "
<< "metadata for " << ns << " at version " << shardVersion << endl;
} else {
errmsg = str::stream() << "failed to refresh shard metadata: " << status.reason();
return false;
}
}
BSONObj stoppedAtKey;
CleanupResult cleanupResult = cleanupOrphanedData(
txn, NamespaceString(ns), startingFromKey, writeConcern, &stoppedAtKey, &errmsg);
if (cleanupResult == CleanupResult_Error) {
return false;
}
if (cleanupResult == CleanupResult_Continue) {
result.append(stoppedAtKeyField(), stoppedAtKey);
} else {
dassert(cleanupResult == CleanupResult_Done);
}
return true;
}
示例11: write
void ClusterWriter::write(OperationContext* txn,
const BatchedCommandRequest& origRequest,
BatchedCommandResponse* response) {
// Add _ids to insert request if req'd
unique_ptr<BatchedCommandRequest> idRequest(BatchedCommandRequest::cloneWithIds(origRequest));
const BatchedCommandRequest* request = NULL != idRequest.get() ? idRequest.get() : &origRequest;
const NamespaceString& nss = request->getNS();
if (!nss.isValid()) {
toBatchError(Status(ErrorCodes::InvalidNamespace, nss.ns() + " is not a valid namespace"),
response);
return;
}
if (!NamespaceString::validCollectionName(nss.coll())) {
toBatchError(
Status(ErrorCodes::BadValue, str::stream() << "invalid collection name " << nss.coll()),
response);
return;
}
if (request->sizeWriteOps() == 0u) {
toBatchError(Status(ErrorCodes::InvalidLength, "no write ops were included in the batch"),
response);
return;
}
if (request->sizeWriteOps() > BatchedCommandRequest::kMaxWriteBatchSize) {
toBatchError(Status(ErrorCodes::InvalidLength,
str::stream() << "exceeded maximum write batch size of "
<< BatchedCommandRequest::kMaxWriteBatchSize),
response);
return;
}
string errMsg;
if (request->isInsertIndexRequest() && !request->isValidIndexRequest(&errMsg)) {
toBatchError(Status(ErrorCodes::InvalidOptions, errMsg), response);
return;
}
// Config writes and shard writes are done differently
const string dbName = nss.db().toString();
unique_ptr<BatchedCommandRequest> requestWithWriteConcern;
if (dbName == "config" || dbName == "admin") {
// w:majority is the only valid write concern for writes to the config servers.
// We also allow w:1 to come in on a user-initiated write, though we convert it here to
// w:majority before sending it to the config servers.
bool rewriteCmdWithWriteConcern = false;
WriteConcernOptions writeConcern;
if (request->isWriteConcernSet()) {
Status status = writeConcern.parse(request->getWriteConcern());
if (!status.isOK()) {
toBatchError(status, response);
return;
}
if (!writeConcern.validForConfigServers()) {
toBatchError(Status(ErrorCodes::InvalidOptions,
"Invalid replication write concern. Writes to config servers "
"must use w:'majority'"),
response);
return;
}
if (writeConcern.wMode == "") {
invariant(writeConcern.wNumNodes == 1);
rewriteCmdWithWriteConcern = true;
}
} else {
rewriteCmdWithWriteConcern = true;
}
if (rewriteCmdWithWriteConcern) {
requestWithWriteConcern.reset(new BatchedCommandRequest(request->getBatchType()));
request->cloneTo(requestWithWriteConcern.get());
writeConcern.wMode = WriteConcernOptions::kMajority;
writeConcern.wNumNodes = 0;
requestWithWriteConcern->setWriteConcern(writeConcern.toBSON());
request = requestWithWriteConcern.get();
}
grid.catalogClient(txn)->writeConfigServerDirect(txn, *request, response);
} else {
TargeterStats targeterStats;
{
ChunkManagerTargeter targeter(request->getTargetingNSS(), &targeterStats);
Status targetInitStatus = targeter.init(txn);
if (!targetInitStatus.isOK()) {
toBatchError(Status(targetInitStatus.code(),
str::stream()
<< "unable to target"
<< (request->isInsertIndexRequest() ? " index" : "")
<< " write op for collection "
<< request->getTargetingNS()
<< causedBy(targetInitStatus)),
response);
return;
}
//.........这里部分代码省略.........
示例12: insertConfigDocument
Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) {
invariant(nss.db() == NamespaceString::kAdminDb || nss.db() == NamespaceString::kConfigDb);
const BSONElement idField = doc.getField("_id");
invariant(!idField.eoo());
BatchedCommandRequest request([&] {
write_ops::Insert insertOp(nss);
insertOp.setDocuments({doc});
return insertOp;
}());
request.setWriteConcern(writeConcern.toBSON());
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
for (int retry = 1; retry <= kMaxWriteRetry; retry++) {
auto response = configShard->runBatchWriteCommand(
opCtx, Shard::kDefaultConfigCommandTimeout, request, Shard::RetryPolicy::kNoRetry);
Status status = response.toStatus();
if (retry < kMaxWriteRetry &&
configShard->isRetriableError(status.code(), Shard::RetryPolicy::kIdempotent)) {
// Pretend like the operation is idempotent because we're handling DuplicateKey errors
// specially
continue;
}
// If we get DuplicateKey error on the first attempt to insert, this definitively means that
// we are trying to insert the same entry a second time, so error out. If it happens on a
// retry attempt though, it is not clear whether we are actually inserting a duplicate key
// or it is because we failed to wait for write concern on the first attempt. In order to
// differentiate, fetch the entry and check.
if (retry > 1 && status == ErrorCodes::DuplicateKey) {
LOG(1) << "Insert retry failed because of duplicate key error, rechecking.";
auto fetchDuplicate =
_exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
nss,
idField.wrap(),
BSONObj(),
boost::none);
if (!fetchDuplicate.isOK()) {
return fetchDuplicate.getStatus();
}
auto existingDocs = fetchDuplicate.getValue().value;
if (existingDocs.empty()) {
return {status.withContext(
stream() << "DuplicateKey error was returned after a retry attempt, but no "
"documents were found. This means a concurrent change occurred "
"together with the retries.")};
}
invariant(existingDocs.size() == 1);
BSONObj existing = std::move(existingDocs.front());
if (existing.woCompare(doc) == 0) {
// Documents match, so treat the operation as success
return Status::OK();
}
}
return status;
}
MONGO_UNREACHABLE;
}
示例13: applyChunkOpsDeprecated
Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
const NamespaceString& nss,
const ChunkVersion& lastChunkVersion,
const WriteConcernOptions& writeConcern,
repl::ReadConcernLevel readConcern) {
invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer ||
(readConcern == repl::ReadConcernLevel::kMajorityReadConcern &&
writeConcern.wMode == WriteConcernOptions::kMajority));
BSONObj cmd = BSON("applyOps" << updateOps << "preCondition" << preCondition
<< WriteConcernOptions::kWriteConcernField
<< writeConcern.toBSON());
auto response =
Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"config",
cmd,
Shard::RetryPolicy::kIdempotent);
if (!response.isOK()) {
return response.getStatus();
}
Status status = response.getValue().commandStatus.isOK()
? std::move(response.getValue().writeConcernStatus)
: std::move(response.getValue().commandStatus);
// TODO (Dianna) This fail point needs to be reexamined when CommitChunkMigration is in:
// migrations will no longer be able to exercise it, so split or merge will need to do so.
// SERVER-22659.
if (MONGO_FAIL_POINT(failApplyChunkOps)) {
status = Status(ErrorCodes::InternalError, "Failpoint 'failApplyChunkOps' generated error");
}
if (!status.isOK()) {
string errMsg;
// This could be a blip in the network connectivity. Check if the commit request made it.
//
// If all the updates were successfully written to the chunks collection, the last
// document in the list of updates should be returned from a query to the chunks
// collection. The last chunk can be identified by namespace and version number.
warning() << "chunk operation commit failed and metadata will be revalidated"
<< causedBy(redact(status));
// Look for the chunk in this shard whose version got bumped. We assume that if that
// mod made it to the config server, then transaction was successful.
BSONObjBuilder query;
lastChunkVersion.appendLegacyWithField(&query, ChunkType::lastmod());
query.append(ChunkType::ns(), nss.ns());
auto chunkWithStatus = getChunks(opCtx, query.obj(), BSONObj(), 1, nullptr, readConcern);
if (!chunkWithStatus.isOK()) {
errMsg = str::stream()
<< "getChunks function failed, unable to validate chunk "
<< "operation metadata: " << chunkWithStatus.getStatus().toString()
<< ". applyChunkOpsDeprecated failed to get confirmation "
<< "of commit. Unable to save chunk ops. Command: " << cmd
<< ". Result: " << response.getValue().response;
return status.withContext(errMsg);
};
const auto& newestChunk = chunkWithStatus.getValue();
if (newestChunk.empty()) {
errMsg = str::stream() << "chunk operation commit failed: version "
<< lastChunkVersion.toString()
<< " doesn't exist in namespace: " << nss.ns()
<< ". Unable to save chunk ops. Command: " << cmd
<< ". Result: " << response.getValue().response;
return status.withContext(errMsg);
};
invariant(newestChunk.size() == 1);
return Status::OK();
}
return Status::OK();
}
示例14: runUserManagementWriteCommand
bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext* opCtx,
const std::string& commandName,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
BSONObj cmdToRun = cmdObj;
{
// Make sure that if the command has a write concern that it is w:1 or w:majority, and
// convert w:1 or no write concern to w:majority before sending.
WriteConcernOptions writeConcern;
writeConcern.reset();
BSONElement writeConcernElement = cmdObj[WriteConcernOptions::kWriteConcernField];
bool initialCmdHadWriteConcern = !writeConcernElement.eoo();
if (initialCmdHadWriteConcern) {
Status status = writeConcern.parse(writeConcernElement.Obj());
if (!status.isOK()) {
return CommandHelpers::appendCommandStatusNoThrow(*result, status);
}
if (!(writeConcern.wNumNodes == 1 ||
writeConcern.wMode == WriteConcernOptions::kMajority)) {
return CommandHelpers::appendCommandStatusNoThrow(
*result,
{ErrorCodes::InvalidOptions,
str::stream() << "Invalid replication write concern. User management write "
"commands may only use w:1 or w:'majority', got: "
<< writeConcern.toBSON()});
}
}
writeConcern.wMode = WriteConcernOptions::kMajority;
writeConcern.wNumNodes = 0;
BSONObjBuilder modifiedCmd;
if (!initialCmdHadWriteConcern) {
modifiedCmd.appendElements(cmdObj);
} else {
BSONObjIterator cmdObjIter(cmdObj);
while (cmdObjIter.more()) {
BSONElement e = cmdObjIter.next();
if (WriteConcernOptions::kWriteConcernField == e.fieldName()) {
continue;
}
modifiedCmd.append(e);
}
}
modifiedCmd.append(WriteConcernOptions::kWriteConcernField, writeConcern.toBSON());
cmdToRun = modifiedCmd.obj();
}
auto response =
Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbname,
cmdToRun,
Shard::kDefaultConfigCommandTimeout,
Shard::RetryPolicy::kNotIdempotent);
if (!response.isOK()) {
return CommandHelpers::appendCommandStatusNoThrow(*result, response.getStatus());
}
if (!response.getValue().commandStatus.isOK()) {
return CommandHelpers::appendCommandStatusNoThrow(*result,
response.getValue().commandStatus);
}
if (!response.getValue().writeConcernStatus.isOK()) {
return CommandHelpers::appendCommandStatusNoThrow(*result,
response.getValue().writeConcernStatus);
}
CommandHelpers::filterCommandReplyForPassthrough(response.getValue().response, result);
return true;
}
示例15: waitForWriteConcern
Status waitForWriteConcern(OperationContext* txn,
const OpTime& replOpTime,
const WriteConcernOptions& writeConcern,
WriteConcernResult* result) {
LOG(2) << "Waiting for write concern. OpTime: " << replOpTime
<< ", write concern: " << writeConcern.toBSON();
auto replCoord = repl::ReplicationCoordinator::get(txn);
MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeWaitingForWriteConcern);
// Next handle blocking on disk
Timer syncTimer;
WriteConcernOptions writeConcernWithPopulatedSyncMode =
replCoord->populateUnsetWriteConcernOptionsSyncMode(writeConcern);
switch (writeConcernWithPopulatedSyncMode.syncMode) {
case WriteConcernOptions::SyncMode::UNSET:
severe() << "Attempting to wait on a WriteConcern with an unset sync option";
fassertFailed(34410);
case WriteConcernOptions::SyncMode::NONE:
break;
case WriteConcernOptions::SyncMode::FSYNC: {
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
if (!storageEngine->isDurable()) {
result->fsyncFiles = storageEngine->flushAllFiles(true);
} else {
// We only need to commit the journal if we're durable
txn->recoveryUnit()->waitUntilDurable();
}
break;
}
case WriteConcernOptions::SyncMode::JOURNAL:
if (replCoord->getReplicationMode() != repl::ReplicationCoordinator::Mode::modeNone) {
// Wait for ops to become durable then update replication system's
// knowledge of this.
OpTime appliedOpTime = replCoord->getMyLastAppliedOpTime();
txn->recoveryUnit()->waitUntilDurable();
replCoord->setMyLastDurableOpTimeForward(appliedOpTime);
} else {
txn->recoveryUnit()->waitUntilDurable();
}
break;
}
result->syncMillis = syncTimer.millis();
// Now wait for replication
if (replOpTime.isNull()) {
// no write happened for this client yet
return Status::OK();
}
// needed to avoid incrementing gleWtimeStats SERVER-9005
if (writeConcernWithPopulatedSyncMode.wNumNodes <= 1 &&
writeConcernWithPopulatedSyncMode.wMode.empty()) {
// no desired replication check
return Status::OK();
}
// Replica set stepdowns and gle mode changes are thrown as errors
repl::ReplicationCoordinator::StatusAndDuration replStatus =
replCoord->awaitReplication(txn, replOpTime, writeConcernWithPopulatedSyncMode);
if (replStatus.status == ErrorCodes::WriteConcernFailed) {
gleWtimeouts.increment();
result->err = "timeout";
result->wTimedOut = true;
}
// Add stats
result->writtenTo = repl::getGlobalReplicationCoordinator()->getHostsWrittenTo(
replOpTime,
writeConcernWithPopulatedSyncMode.syncMode == WriteConcernOptions::SyncMode::JOURNAL);
gleWtimeStats.recordMillis(durationCount<Milliseconds>(replStatus.duration));
result->wTime = durationCount<Milliseconds>(replStatus.duration);
return replStatus.status;
}