本文整理汇总了C++中BatchedCommandRequest::getNS方法的典型用法代码示例。如果您正苦于以下问题:C++ BatchedCommandRequest::getNS方法的具体用法?C++ BatchedCommandRequest::getNS怎么用?C++ BatchedCommandRequest::getNS使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BatchedCommandRequest
的用法示例。
在下文中一共展示了BatchedCommandRequest::getNS方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: write
void ClusterWriter::write(OperationContext* opCtx,
const BatchedCommandRequest& request,
BatchWriteExecStats* stats,
BatchedCommandResponse* response) {
const NamespaceString& nss = request.getNS();
LastError::Disabled disableLastError(&LastError::get(opCtx->getClient()));
// Config writes and shard writes are done differently
if (nss.db() == NamespaceString::kConfigDb || nss.db() == NamespaceString::kAdminDb) {
Grid::get(opCtx)->catalogClient()->writeConfigServerDirect(opCtx, request, response);
} else {
TargeterStats targeterStats;
{
ChunkManagerTargeter targeter(request.getTargetingNS(), &targeterStats);
Status targetInitStatus = targeter.init(opCtx);
if (!targetInitStatus.isOK()) {
toBatchError({targetInitStatus.code(),
str::stream() << "unable to target"
<< (request.isInsertIndexRequest() ? " index" : "")
<< " write op for collection "
<< request.getTargetingNS().ns()
<< causedBy(targetInitStatus)},
response);
return;
}
BatchWriteExec::executeBatch(opCtx, targeter, request, response, stats);
}
splitIfNeeded(opCtx, request.getNS(), targeterStats);
}
}
示例2: shardWrite
void ClusterWriter::shardWrite( const BatchedCommandRequest& request,
BatchedCommandResponse* response ) {
ChunkManagerTargeter targeter;
Status targetInitStatus = targeter.init( NamespaceString( request.getTargetingNS() ) );
if ( !targetInitStatus.isOK() ) {
warning() << "could not initialize targeter for"
<< ( request.isInsertIndexRequest() ? " index" : "" )
<< " write op in collection " << request.getTargetingNS() << endl;
// Errors will be reported in response if we are unable to target
}
DBClientShardResolver resolver;
DBClientMultiCommand dispatcher;
BatchWriteExec exec( &targeter, &resolver, &dispatcher );
exec.executeBatch( request, response );
if ( _autoSplit )
splitIfNeeded( request.getNS(), *targeter.getStats() );
_stats->setShardStats( exec.releaseStats() );
}
示例3: runBatchWriteCommandOnConfig
BatchedCommandResponse Shard::runBatchWriteCommandOnConfig(
OperationContext* txn, const BatchedCommandRequest& batchRequest, RetryPolicy retryPolicy) {
invariant(isConfig());
const std::string dbname = batchRequest.getNS().db().toString();
invariant(batchRequest.sizeWriteOps() == 1);
const BSONObj cmdObj = batchRequest.toBSON();
for (int retry = 1; retry <= kOnErrorNumRetries; ++retry) {
auto response = _runCommand(txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbname,
kDefaultConfigCommandTimeout,
cmdObj);
BatchedCommandResponse batchResponse;
Status writeStatus =
CommandResponse::processBatchWriteResponse(response.commandResponse, &batchResponse);
if (!writeStatus.isOK() && response.host) {
updateReplSetMonitor(response.host.get(), writeStatus);
}
if (retry < kOnErrorNumRetries && isRetriableError(writeStatus.code(), retryPolicy)) {
LOG(2) << "Batch write command failed with retriable error and will be retried"
<< causedBy(redact(writeStatus));
continue;
}
return batchResponse;
}
MONGO_UNREACHABLE;
}
示例4: runBatchWriteCommand
BatchedCommandResponse Shard::runBatchWriteCommand(OperationContext* opCtx,
const Milliseconds maxTimeMS,
const BatchedCommandRequest& batchRequest,
RetryPolicy retryPolicy) {
const std::string dbname = batchRequest.getNS().db().toString();
const BSONObj cmdObj = batchRequest.toBSON();
for (int retry = 1; retry <= kOnErrorNumRetries; ++retry) {
// Note: write commands can only be issued against a primary.
auto swResponse = _runCommand(
opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, dbname, maxTimeMS, cmdObj);
BatchedCommandResponse batchResponse;
auto writeStatus = CommandResponse::processBatchWriteResponse(swResponse, &batchResponse);
if (retry < kOnErrorNumRetries && isRetriableError(writeStatus.code(), retryPolicy)) {
LOG(2) << "Batch write command to " << getId()
<< " failed with retriable error and will be retried"
<< causedBy(redact(writeStatus));
continue;
}
return batchResponse;
}
MONGO_UNREACHABLE;
}
示例5: writeOp
void Strategy::writeOp(OperationContext* txn, int op, Request& request) {
// make sure we have a last error
dassert(&LastError::get(cc()));
OwnedPointerVector<BatchedCommandRequest> commandRequestsOwned;
vector<BatchedCommandRequest*>& commandRequests = commandRequestsOwned.mutableVector();
msgToBatchRequests(request.m(), &commandRequests);
for (vector<BatchedCommandRequest*>::iterator it = commandRequests.begin();
it != commandRequests.end();
++it) {
// Multiple commands registered to last error as multiple requests
if (it != commandRequests.begin())
LastError::get(cc()).startRequest();
BatchedCommandRequest* commandRequest = *it;
// Adjust namespaces for command
NamespaceString fullNS(commandRequest->getNS());
string cmdNS = fullNS.getCommandNS();
// We only pass in collection name to command
commandRequest->setNS(fullNS);
BSONObjBuilder builder;
BSONObj requestBSON = commandRequest->toBSON();
{
// Disable the last error object for the duration of the write cmd
LastError::Disabled disableLastError(&LastError::get(cc()));
Command::runAgainstRegistered(txn, cmdNS.c_str(), requestBSON, builder, 0);
}
BatchedCommandResponse commandResponse;
bool parsed = commandResponse.parseBSON(builder.done(), NULL);
(void)parsed; // for compile
dassert(parsed && commandResponse.isValid(NULL));
// Populate the lastError object based on the write response
LastError::get(cc()).reset();
bool hadError =
batchErrorToLastError(*commandRequest, commandResponse, &LastError::get(cc()));
// Check if this is an ordered batch and we had an error which should stop processing
if (commandRequest->getOrdered() && hadError)
break;
}
}
示例6: clusterWrite
void clusterWrite( const BatchedCommandRequest& request,
BatchedCommandResponse* response,
bool autoSplit ) {
// App-level validation of a create index insert
if ( request.isInsertIndexRequest() ) {
if ( request.sizeWriteOps() != 1 || request.isWriteConcernSet() ) {
// Invalid request to create index
response->setOk( false );
response->setErrCode( ErrorCodes::InvalidOptions );
response->setErrMessage( "invalid batch request for index creation" );
dassert( response->isValid( NULL ) );
return;
}
}
// Config writes and shard writes are done differently
string dbName = NamespaceString( request.getNS() ).db().toString();
if ( dbName == "config" || dbName == "admin" ) {
bool verboseWC = request.isVerboseWC();
// We only support batch sizes of one and {w:0} write concern for config writes
if ( request.sizeWriteOps() != 1 || ( verboseWC && request.isWriteConcernSet() ) ) {
// Invalid config server write
response->setOk( false );
response->setErrCode( ErrorCodes::InvalidOptions );
response->setErrMessage( "invalid batch request for config write" );
dassert( response->isValid( NULL ) );
return;
}
// We need to support "best-effort" writes for pings to the config server.
// {w:0} (!verbose) writes are interpreted as best-effort in this case - they may still
// error, but do not do the initial fsync check.
configWrite( request, response, verboseWC );
}
else {
shardWrite( request, response, autoSplit );
}
}
示例7: writeConfigServerDirect
void CatalogManagerReplicaSet::writeConfigServerDirect(const BatchedCommandRequest& batchRequest,
BatchedCommandResponse* batchResponse) {
std::string dbname = batchRequest.getNS().db().toString();
invariant(dbname == "config" || dbname == "admin");
const BSONObj cmdObj = batchRequest.toBSON();
auto response = _runConfigServerCommandWithNotMasterRetries(dbname, cmdObj);
if (!response.isOK()) {
_toBatchError(response.getStatus(), batchResponse);
return;
}
string errmsg;
if (!batchResponse->parseBSON(response.getValue(), &errmsg)) {
_toBatchError(Status(ErrorCodes::FailedToParse,
str::stream() << "Failed to parse config server response: " << errmsg),
batchResponse);
}
}
示例8: validateBatch
// static
Status WriteBatchExecutor::validateBatch( const BatchedCommandRequest& request ) {
// Validate namespace
const NamespaceString nss = NamespaceString( request.getNS() );
if ( !nss.isValid() ) {
return Status( ErrorCodes::InvalidNamespace,
nss.ns() + " is not a valid namespace" );
}
// Make sure we can write to the namespace
Status allowedStatus = userAllowedWriteNS( nss );
if ( !allowedStatus.isOK() ) {
return allowedStatus;
}
// Validate insert index requests
// TODO: Push insert index requests through createIndex once all upgrade paths support it
string errMsg;
if ( request.isInsertIndexRequest() && !request.isValidIndexRequest( &errMsg ) ) {
return Status( ErrorCodes::InvalidOptions, errMsg );
}
return Status::OK();
}
示例9: safeWriteBatch
void BatchSafeWriter::safeWriteBatch( DBClientBase* conn,
const BatchedCommandRequest& request,
BatchedCommandResponse* response ) {
const NamespaceString nss( request.getNS() );
// N starts at zero, and we add to it for each item
response->setN( 0 );
// GLE path always sets nModified to -1 (sentinel) to indicate we should omit it later.
response->setNModified(-1);
for ( size_t i = 0; i < request.sizeWriteOps(); ++i ) {
// Break on first error if we're ordered
if ( request.getOrdered() && response->isErrDetailsSet() )
break;
BatchItemRef itemRef( &request, static_cast<int>( i ) );
BSONObj gleResult;
GLEErrors errors;
Status status = _safeWriter->safeWrite( conn,
itemRef,
WriteConcernOptions::Acknowledged,
&gleResult );
if ( status.isOK() ) {
status = extractGLEErrors( gleResult, &errors );
}
if ( !status.isOK() ) {
response->clear();
response->setOk( false );
response->setErrCode( ErrorCodes::RemoteResultsUnavailable );
StringBuilder builder;
builder << "could not get write error from safe write";
builder << causedBy( status.toString() );
response->setErrMessage( builder.str() );
return;
}
if ( errors.wcError.get() ) {
response->setWriteConcernError( errors.wcError.release() );
}
//
// STATS HANDLING
//
GLEStats stats;
extractGLEStats( gleResult, &stats );
// Special case for making legacy "n" field result for insert match the write
// command result.
if ( request.getBatchType() == BatchedCommandRequest::BatchType_Insert
&& !errors.writeError.get() ) {
// n is always 0 for legacy inserts.
dassert( stats.n == 0 );
stats.n = 1;
}
response->setN( response->getN() + stats.n );
if ( !stats.upsertedId.isEmpty() ) {
BatchedUpsertDetail* upsertedId = new BatchedUpsertDetail;
upsertedId->setIndex( i );
upsertedId->setUpsertedID( stats.upsertedId );
response->addToUpsertDetails( upsertedId );
}
response->setLastOp( stats.lastOp );
// Save write error
if ( errors.writeError.get() ) {
errors.writeError->setIndex( i );
response->addToErrDetails( errors.writeError.release() );
}
}
//
// WRITE CONCERN ERROR HANDLING
//
// The last write is weird, since we enforce write concern and check the error through
// the same GLE if possible. If the last GLE was an error, the write concern may not
// have been enforced in that same GLE, so we need to send another after resetting the
// error.
BSONObj writeConcern;
if ( request.isWriteConcernSet() ) {
writeConcern = request.getWriteConcern();
// Pre-2.4.2 mongods react badly to 'w' being set on config servers
if ( nss.db() == "config" )
writeConcern = fixWCForConfig( writeConcern );
}
bool needToEnforceWC = WriteConcernOptions::Acknowledged.woCompare(writeConcern) != 0 &&
WriteConcernOptions::Unacknowledged.woCompare(writeConcern) != 0;
//.........这里部分代码省略.........
示例10: executeBatch
void WriteBatchExecutor::executeBatch( const BatchedCommandRequest& request,
BatchedCommandResponse* response ) {
// Validate namespace
const NamespaceString nss = NamespaceString( request.getNS() );
if ( !nss.isValid() ) {
toBatchError( Status( ErrorCodes::InvalidNamespace,
nss.ns() + " is not a valid namespace" ),
response );
return;
}
// Make sure we can write to the namespace
Status allowedStatus = userAllowedWriteNS( nss );
if ( !allowedStatus.isOK() ) {
toBatchError( allowedStatus, response );
return;
}
// Validate insert index requests
// TODO: Push insert index requests through createIndex once all upgrade paths support it
string errMsg;
if ( request.isInsertIndexRequest() && !request.isValidIndexRequest( &errMsg ) ) {
toBatchError( Status( ErrorCodes::InvalidOptions, errMsg ), response );
return;
}
// Validate write concern
// TODO: Lift write concern parsing out of this entirely
WriteConcernOptions writeConcern;
BSONObj wcDoc;
if ( request.isWriteConcernSet() ) {
wcDoc = request.getWriteConcern();
}
Status wcStatus = Status::OK();
if ( wcDoc.isEmpty() ) {
// The default write concern if empty is w : 1
// Specifying w : 0 is/was allowed, but is interpreted identically to w : 1
wcStatus = writeConcern.parse(
_defaultWriteConcern.isEmpty() ?
WriteConcernOptions::Acknowledged : _defaultWriteConcern );
if ( writeConcern.wNumNodes == 0 && writeConcern.wMode.empty() ) {
writeConcern.wNumNodes = 1;
}
}
else {
wcStatus = writeConcern.parse( wcDoc );
}
if ( wcStatus.isOK() ) {
wcStatus = validateWriteConcern( writeConcern );
}
if ( !wcStatus.isOK() ) {
toBatchError( wcStatus, response );
return;
}
if ( request.sizeWriteOps() == 0u ) {
toBatchError( Status( ErrorCodes::InvalidLength,
"no write ops were included in the batch" ),
response );
return;
}
// Validate batch size
if ( request.sizeWriteOps() > BatchedCommandRequest::kMaxWriteBatchSize ) {
toBatchError( Status( ErrorCodes::InvalidLength,
stream() << "exceeded maximum write batch size of "
<< BatchedCommandRequest::kMaxWriteBatchSize ),
response );
return;
}
//
// End validation
//
bool silentWC = writeConcern.wMode.empty() && writeConcern.wNumNodes == 0
&& writeConcern.syncMode == WriteConcernOptions::NONE;
Timer commandTimer;
OwnedPointerVector<WriteErrorDetail> writeErrorsOwned;
vector<WriteErrorDetail*>& writeErrors = writeErrorsOwned.mutableVector();
OwnedPointerVector<BatchedUpsertDetail> upsertedOwned;
vector<BatchedUpsertDetail*>& upserted = upsertedOwned.mutableVector();
//
// Apply each batch item, possibly bulking some items together in the write lock.
// Stops on error if batch is ordered.
//
bulkExecute( request, &upserted, &writeErrors );
//.........这里部分代码省略.........
示例11: executeBatch
//.........这里部分代码省略.........
Status resolveStatus = _resolver->chooseWriteHost( nextBatch->getEndpoint()
.shardName,
&shardHost );
if ( !resolveStatus.isOK() ) {
++_stats->numResolveErrors;
// Record a resolve failure
// TODO: It may be necessary to refresh the cache if stale, or maybe just
// cancel and retarget the batch
WriteErrorDetail error;
buildErrorFrom( resolveStatus, &error );
batchOp.noteBatchError( *nextBatch, error );
// We're done with this batch
*it = NULL;
--numToSend;
continue;
}
// If we already have a batch for this host, wait until the next time
HostBatchMap::iterator pendingIt = pendingBatches.find( shardHost );
if ( pendingIt != pendingBatches.end() ) continue;
//
// We now have all the info needed to dispatch the batch
//
BatchedCommandRequest request( clientRequest.getBatchType() );
batchOp.buildBatchRequest( *nextBatch, &request );
// Internally we use full namespaces for request/response, but we send the
// command to a database with the collection name in the request.
NamespaceString nss( request.getNS() );
request.setNS( nss.coll() );
_dispatcher->addCommand( shardHost, nss.db(), request );
// Indicate we're done by setting the batch to NULL
// We'll only get duplicate hostEndpoints if we have broadcast and non-broadcast
// endpoints for the same host, so this should be pretty efficient without
// moving stuff around.
*it = NULL;
// Recv-side is responsible for cleaning up the nextBatch when used
pendingBatches.insert( make_pair( shardHost, nextBatch ) );
}
// Send them all out
_dispatcher->sendAll();
numSent += pendingBatches.size();
//
// Recv side
//
while ( _dispatcher->numPending() > 0 ) {
// Get the response
ConnectionString shardHost;
BatchedCommandResponse response;
Status dispatchStatus = _dispatcher->recvAny( &shardHost, &response );
// Get the TargetedWriteBatch to find where to put the response
dassert( pendingBatches.find( shardHost ) != pendingBatches.end() );
TargetedWriteBatch* batchRaw = pendingBatches.find( shardHost )->second;
示例12: executeBatch
/**
* The core config write functionality.
*
* Config writes run in two passes - the first is a quick check to ensure the config servers
* are all reachable, the second runs the actual write.
*
* TODO: Upgrade and move this logic to the config servers, a state machine implementation
* is probably the next step.
*/
void ConfigCoordinator::executeBatch( const BatchedCommandRequest& clientRequest,
BatchedCommandResponse* clientResponse,
bool fsyncCheck ) {
NamespaceString nss( clientRequest.getNS() );
dassert( nss.db() == "config" || nss.db() == "admin" );
dassert( clientRequest.sizeWriteOps() == 1u );
if ( fsyncCheck ) {
//
// Sanity check that all configs are still reachable using fsync, preserving legacy
// behavior
//
OwnedPointerVector<ConfigFsyncResponse> fsyncResponsesOwned;
vector<ConfigFsyncResponse*>& fsyncResponses = fsyncResponsesOwned.mutableVector();
//
// Send side
//
for ( vector<ConnectionString>::iterator it = _configHosts.begin();
it != _configHosts.end(); ++it ) {
ConnectionString& configHost = *it;
FsyncRequest fsyncRequest;
_dispatcher->addCommand( configHost, "admin", fsyncRequest );
}
_dispatcher->sendAll();
//
// Recv side
//
bool fsyncError = false;
while ( _dispatcher->numPending() > 0 ) {
fsyncResponses.push_back( new ConfigFsyncResponse() );
ConfigFsyncResponse& fsyncResponse = *fsyncResponses.back();
Status dispatchStatus = _dispatcher->recvAny( &fsyncResponse.configHost,
&fsyncResponse.response );
// We've got to recv everything, no matter what
if ( !dispatchStatus.isOK() ) {
fsyncError = true;
buildFsyncErrorFrom( dispatchStatus, &fsyncResponse.response );
}
else if ( !fsyncResponse.response.getOk() ) {
fsyncError = true;
}
}
if ( fsyncError ) {
combineFsyncErrors( fsyncResponses, clientResponse );
return;
}
else {
fsyncResponsesOwned.clear();
}
}
//
// Do the actual writes
//
BatchedCommandRequest configRequest( clientRequest.getBatchType() );
clientRequest.cloneTo( &configRequest );
configRequest.setNS( nss.coll() );
OwnedPointerVector<ConfigResponse> responsesOwned;
vector<ConfigResponse*>& responses = responsesOwned.mutableVector();
//
// Send the actual config writes
//
// Get as many batches as we can at once
for ( vector<ConnectionString>::iterator it = _configHosts.begin();
it != _configHosts.end(); ++it ) {
ConnectionString& configHost = *it;
_dispatcher->addCommand( configHost, nss.db(), configRequest );
}
// Send them all out
_dispatcher->sendAll();
//
// Recv side
//
//.........这里部分代码省略.........
示例13: applyWriteItem
bool WriteBatchExecutor::applyWriteItem( const BatchedCommandRequest& request,
int index,
WriteStats* stats,
BatchedErrorDetail* error ) {
const string& ns = request.getNS();
// Clear operation's LastError before starting.
_le->reset( true );
//uint64_t itemTimeMicros = 0;
bool opSuccess = true;
// Each write operation executes in its own PageFaultRetryableSection. This means that
// a single batch can throw multiple PageFaultException's, which is not the case for
// other operations.
PageFaultRetryableSection s;
while ( true ) {
try {
// Execute the write item as a child operation of the current operation.
CurOp childOp( _client, _client->curop() );
// TODO Modify CurOp "wrapped" constructor to take an opcode, so calling .reset()
// is unneeded
childOp.reset( _client->getRemote(), getOpCode( request.getBatchType() ) );
childOp.ensureStarted();
OpDebug& opDebug = childOp.debug();
opDebug.ns = ns;
{
Client::WriteContext ctx( ns );
switch ( request.getBatchType() ) {
case BatchedCommandRequest::BatchType_Insert:
opSuccess =
applyInsert( ns,
request.getInsertRequest()->getDocumentsAt( index ),
&childOp,
stats,
error );
break;
case BatchedCommandRequest::BatchType_Update:
opSuccess = applyUpdate( ns,
*request.getUpdateRequest()->getUpdatesAt( index ),
&childOp,
stats,
error );
break;
default:
dassert( request.getBatchType() ==
BatchedCommandRequest::BatchType_Delete );
opSuccess = applyDelete( ns,
*request.getDeleteRequest()->getDeletesAt( index ),
&childOp,
stats,
error );
break;
}
}
childOp.done();
//itemTimeMicros = childOp.totalTimeMicros();
opDebug.executionTime = childOp.totalTimeMillis();
opDebug.recordStats();
// Log operation if running with at least "-v", or if exceeds slow threshold.
if ( logger::globalLogDomain()->shouldLog( logger::LogSeverity::Debug( 1 ) )
|| opDebug.executionTime > cmdLine.slowMS + childOp.getExpectedLatencyMs() ) {
MONGO_TLOG(1) << opDebug.report( childOp ) << endl;
}
// TODO Log operation if logLevel >= 3 and assertion thrown (as assembleResponse()
// does).
// Save operation to system.profile if shouldDBProfile().
if ( childOp.shouldDBProfile( opDebug.executionTime ) ) {
profile( *_client, getOpCode( request.getBatchType() ), childOp );
}
break;
}
catch ( PageFaultException& e ) {
e.touch();
}
}
return opSuccess;
}
示例14: executeBatch
/**
* The core config write functionality.
*
* Config writes run in two passes - the first is a quick check to ensure the config servers
* are all reachable, the second runs the actual write.
*
* TODO: Upgrade and move this logic to the config servers, a state machine implementation
* is probably the next step.
*/
void ConfigCoordinator::executeBatch(const BatchedCommandRequest& clientRequest,
BatchedCommandResponse* clientResponse) {
const NamespaceString nss(clientRequest.getNS());
// Should never use it for anything other than DBs residing on the config server
dassert(nss.db() == "config" || nss.db() == "admin");
dassert(clientRequest.sizeWriteOps() == 1u);
// This is an opportunistic check that all config servers look healthy by calling
// getLastError on each one of them. If there was some form of write/journaling error, get
// last error would fail.
{
for (vector<ConnectionString>::iterator it = _configHosts.begin();
it != _configHosts.end();
++it) {
_dispatcher->addCommand(*it,
"admin",
RawBSONSerializable(BSON("getLastError" << true <<
"fsync" << true)));
}
_dispatcher->sendAll();
bool error = false;
while (_dispatcher->numPending()) {
ConnectionString host;
RawBSONSerializable response;
Status status = _dispatcher->recvAny(&host, &response);
if (status.isOK()) {
BSONObj obj = response.toBSON();
LOG(3) << "Response " << obj.toString();
// If the ok field is anything other than 1, count it as error
if (!obj["ok"].trueValue()) {
error = true;
log() << "Config server check for host " << host
<< " returned error: " << response;
}
}
else {
error = true;
log() << "Config server check for host " << host
<< " failed with status: " << status;
}
}
// All responses should have been gathered by this point
if (error) {
clientResponse->setOk(false);
clientResponse->setErrCode(ErrorCodes::RemoteValidationError);
clientResponse->setErrMessage("Could not verify that config servers were active"
" and reachable before write");
return;
}
}
if (!_checkConfigString(clientResponse)) {
return;
}
//
// Do the actual writes
//
BatchedCommandRequest configRequest( clientRequest.getBatchType() );
clientRequest.cloneTo( &configRequest );
configRequest.setNS( nss.coll() );
OwnedPointerVector<ConfigResponse> responsesOwned;
vector<ConfigResponse*>& responses = responsesOwned.mutableVector();
//
// Send the actual config writes
//
// Get as many batches as we can at once
for (vector<ConnectionString>::const_iterator it = _configHosts.begin();
it != _configHosts.end();
++it) {
const ConnectionString& configHost = *it;
_dispatcher->addCommand(configHost, nss.db(), configRequest);
}
// Send them all out
_dispatcher->sendAll();
//.........这里部分代码省略.........
示例15: write
void ClusterWriter::write( const BatchedCommandRequest& request,
BatchedCommandResponse* response ) {
const NamespaceString nss = NamespaceString( request.getNS() );
if ( !nss.isValid() ) {
toBatchError( Status( ErrorCodes::InvalidNamespace,
nss.ns() + " is not a valid namespace" ),
response );
return;
}
if ( !NamespaceString::validCollectionName( nss.coll() ) ) {
toBatchError( Status( ErrorCodes::BadValue,
str::stream() << "invalid collection name " << nss.coll() ),
response );
return;
}
if ( request.sizeWriteOps() > BatchedCommandRequest::kMaxWriteBatchSize ) {
toBatchError( Status( ErrorCodes::FailedToParse,
str::stream() << "exceeded maximum write batch size of "
<< BatchedCommandRequest::kMaxWriteBatchSize ),
response );
return;
}
string errMsg;
if ( request.isInsertIndexRequest() && !request.isValidIndexRequest( &errMsg ) ) {
toBatchError( Status( ErrorCodes::InvalidOptions, errMsg ), response );
return;
}
// Config writes and shard writes are done differently
string dbName = nss.db().toString();
if ( dbName == "config" || dbName == "admin" ) {
bool verboseWC = request.isVerboseWC();
// We only support batch sizes of one for config writes
if ( request.sizeWriteOps() != 1 ) {
toBatchError( Status( ErrorCodes::InvalidOptions,
mongoutils::str::stream() << "Writes to config servers must "
"have batch size of 1, found "
<< request.sizeWriteOps() ),
response );
return;
}
// We only support {w: 0}, {w: 1}, and {w: 'majority'} write concern for config writes
if ( request.isWriteConcernSet() && !validConfigWC( request.getWriteConcern() )) {
toBatchError( Status( ErrorCodes::InvalidOptions,
mongoutils::str::stream() << "Invalid write concern for write"
" to config servers: " << request.getWriteConcern() ),
response );
return;
}
// We need to support "best-effort" writes for pings to the config server.
// {w:0} (!verbose) writes are interpreted as best-effort in this case - they may still
// error, but do not do the initial fsync check.
configWrite( request, response, verboseWC );
}
else {
shardWrite( request, response );
}
}