本文整理汇总了C++中ShardedConnectionInfo::getVersion方法的典型用法代码示例。如果您正苦于以下问题:C++ ShardedConnectionInfo::getVersion方法的具体用法?C++ ShardedConnectionInfo::getVersion怎么用?C++ ShardedConnectionInfo::getVersion使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ShardedConnectionInfo
的用法示例。
在下文中一共展示了ShardedConnectionInfo::getVersion方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string ns = cmdObj["getShardVersion"].valuestrsafe();
if ( ns.size() == 0 ) {
errmsg = "need to specify full namespace";
return false;
}
result.append( "configServer" , shardingState.getConfigServer() );
result.appendTimestamp( "global" , shardingState.getVersion(ns).toLong() );
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
result.appendBool( "inShardedMode" , info != 0 );
if ( info )
result.appendTimestamp( "mine" , info->getVersion(ns).toLong() );
else
result.appendTimestamp( "mine" , 0 );
if ( cmdObj["fullMetadata"].trueValue() ) {
CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( ns );
if ( metadata ) result.append( "metadata", metadata->toBSON() );
else result.append( "metadata", BSONObj() );
}
return true;
}
示例2: shardVersionOk
/**
* @ return true if not in sharded mode
or if version for this client is ok
*/
bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ) {
if ( ! shardingState.enabled() )
return true;
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
if ( ! info ) {
// this means the client has nothing sharded
// so this allows direct connections to do whatever they want
// which i think is the correct behavior
return true;
}
if ( info->inForceVersionOkMode() ) {
return true;
}
// TODO
// all collections at some point, be sharded or not, will have a version (and a ShardChunkManager)
// for now, we remove the sharding state of dropped collection
// so delayed request may come in. This has to be fixed.
ConfigVersion clientVersion = info->getVersion(ns);
ConfigVersion version;
if ( ! shardingState.hasVersion( ns , version ) && clientVersion == 0 ) {
return true;
}
if ( version == 0 && clientVersion > 0 ) {
stringstream ss;
ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion;
errmsg = ss.str();
return false;
}
if ( clientVersion >= version )
return true;
if ( clientVersion == 0 ) {
stringstream ss;
ss << "client in sharded mode, but doesn't have version set for this collection: " << ns << " myVersion: " << version;
errmsg = ss.str();
return false;
}
if ( version.majorVersion() == clientVersion.majorVersion() ) {
// this means there was just a split
// since on a split w/o a migrate this server is ok
// going to accept
return true;
}
stringstream ss;
ss << "your version is too old ns: " + ns << " global: " << version << " client: " << clientVersion;
errmsg = ss.str();
return false;
}
示例3: shardVersionOk
/**
* @ return true if not in sharded mode
or if version for this client is ok
*/
bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ){
if ( ! shardingState.enabled() )
return true;
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
if ( ! info ){
// this means the client has nothing sharded
// so this allows direct connections to do whatever they want
// which i think is the correct behavior
return true;
}
if ( info->inForceVersionOkMode() ){
return true;
}
ConfigVersion version;
if ( ! shardingState.hasVersion( ns , version ) ){
return true;
}
ConfigVersion clientVersion = info->getVersion(ns);
if ( version == 0 && clientVersion > 0 ){
stringstream ss;
ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion;
errmsg = ss.str();
return false;
}
if ( clientVersion >= version )
return true;
if ( clientVersion == 0 ){
stringstream ss;
ss << "client in sharded mode, but doesn't have version set for this collection: " << ns << " myVersion: " << version;
errmsg = ss.str();
return false;
}
if ( isWriteOp && version.majorVersion() == clientVersion.majorVersion() ){
// this means there was just a split
// since on a split w/o a migrate this server is ok
// going to accept write
return true;
}
stringstream ss;
ss << "your version is too old ns: " + ns << " global: " << version << " client: " << clientVersion;
errmsg = ss.str();
return false;
}
示例4: run
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string ns = cmdObj["getShardVersion"].valuestrsafe();
if ( ns.size() == 0 ) {
errmsg = "need to speciy fully namespace";
return false;
}
result.append( "configServer" , shardingState.getConfigServer() );
result.appendTimestamp( "global" , shardingState.getVersion(ns) );
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
if ( info )
result.appendTimestamp( "mine" , info->getVersion(ns) );
else
result.appendTimestamp( "mine" , 0 );
return true;
}
示例5: _checkShardVersionOk
bool CollectionShardingState::_checkShardVersionOk(OperationContext* txn,
string* errmsg,
ChunkVersion* expectedShardVersion,
ChunkVersion* actualShardVersion) {
Client* client = txn->getClient();
// Operations using the DBDirectClient are unversioned.
if (client->isInDirectClient()) {
return true;
}
if (!repl::ReplicationCoordinator::get(txn)->canAcceptWritesForDatabase(_nss.db())) {
// Right now connections to secondaries aren't versioned at all.
return true;
}
const auto& oss = OperationShardingState::get(txn);
// If there is a version attached to the OperationContext, use it as the received version.
// Otherwise, get the received version from the ShardedConnectionInfo.
if (oss.hasShardVersion()) {
*expectedShardVersion = oss.getShardVersion(_nss);
} else {
ShardedConnectionInfo* info = ShardedConnectionInfo::get(client, false);
if (!info) {
// There is no shard version information on either 'txn' or 'client'. This means that
// the operation represented by 'txn' is unversioned, and the shard version is always OK
// for unversioned operations.
return true;
}
*expectedShardVersion = info->getVersion(_nss.ns());
}
if (ChunkVersion::isIgnoredVersion(*expectedShardVersion)) {
return true;
}
// Set this for error messaging purposes before potentially returning false.
auto metadata = getMetadata();
*actualShardVersion = metadata ? metadata->getShardVersion() : ChunkVersion::UNSHARDED();
if (_sourceMgr && _sourceMgr->getMigrationCriticalSectionSignal()) {
*errmsg = str::stream() << "migration commit in progress for " << _nss.ns();
// Set migration critical section on operation sharding state: operation will wait for the
// migration to finish before returning failure and retrying.
OperationShardingState::get(txn).setMigrationCriticalSectionSignal(
_sourceMgr->getMigrationCriticalSectionSignal());
return false;
}
if (expectedShardVersion->isWriteCompatibleWith(*actualShardVersion)) {
return true;
}
//
// Figure out exactly why not compatible, send appropriate error message
// The versions themselves are returned in the error, so not needed in messages here
//
// Check epoch first, to send more meaningful message, since other parameters probably won't
// match either.
if (actualShardVersion->epoch() != expectedShardVersion->epoch()) {
*errmsg = str::stream() << "version epoch mismatch detected for " << _nss.ns() << ", "
<< "the collection may have been dropped and recreated";
return false;
}
if (!actualShardVersion->isSet() && expectedShardVersion->isSet()) {
*errmsg = str::stream() << "this shard no longer contains chunks for " << _nss.ns() << ", "
<< "the collection may have been dropped";
return false;
}
if (actualShardVersion->isSet() && !expectedShardVersion->isSet()) {
*errmsg = str::stream() << "this shard contains versioned chunks for " << _nss.ns() << ", "
<< "but no version set in request";
return false;
}
if (actualShardVersion->majorVersion() != expectedShardVersion->majorVersion()) {
// Could be > or < - wanted is > if this is the source of a migration, wanted < if this is
// the target of a migration
*errmsg = str::stream() << "version mismatch detected for " << _nss.ns();
return false;
}
// Those are all the reasons the versions can mismatch
MONGO_UNREACHABLE;
}
示例6: _checkShardVersionOk
bool CollectionShardingState::_checkShardVersionOk(OperationContext* opCtx,
string* errmsg,
ChunkVersion* expectedShardVersion,
ChunkVersion* actualShardVersion) {
Client* client = opCtx->getClient();
auto& oss = OperationShardingState::get(opCtx);
// If there is a version attached to the OperationContext, use it as the received version.
// Otherwise, get the received version from the ShardedConnectionInfo.
if (oss.hasShardVersion()) {
*expectedShardVersion = oss.getShardVersion(_nss);
} else {
ShardedConnectionInfo* info = ShardedConnectionInfo::get(client, false);
if (!info) {
// There is no shard version information on either 'opCtx' or 'client'. This means that
// the operation represented by 'opCtx' is unversioned, and the shard version is always
// OK for unversioned operations.
return true;
}
*expectedShardVersion = info->getVersion(_nss.ns());
}
// An operation with read concern 'available' should never have shardVersion set.
invariant(repl::ReadConcernArgs::get(opCtx).getLevel() !=
repl::ReadConcernLevel::kAvailableReadConcern);
if (ChunkVersion::isIgnoredVersion(*expectedShardVersion)) {
return true;
}
// Set this for error messaging purposes before potentially returning false.
auto metadata = getMetadata();
*actualShardVersion = metadata ? metadata->getShardVersion() : ChunkVersion::UNSHARDED();
if (_sourceMgr) {
const bool isReader = !opCtx->lockState()->isWriteLocked();
auto criticalSectionSignal = _sourceMgr->getMigrationCriticalSectionSignal(isReader);
if (criticalSectionSignal) {
*errmsg = str::stream() << "migration commit in progress for " << _nss.ns();
// Set migration critical section on operation sharding state: operation will wait for
// the migration to finish before returning failure and retrying.
oss.setMigrationCriticalSectionSignal(criticalSectionSignal);
return false;
}
}
if (expectedShardVersion->isWriteCompatibleWith(*actualShardVersion)) {
return true;
}
//
// Figure out exactly why not compatible, send appropriate error message
// The versions themselves are returned in the error, so not needed in messages here
//
// Check epoch first, to send more meaningful message, since other parameters probably won't
// match either.
if (actualShardVersion->epoch() != expectedShardVersion->epoch()) {
*errmsg = str::stream() << "version epoch mismatch detected for " << _nss.ns() << ", "
<< "the collection may have been dropped and recreated";
return false;
}
if (!actualShardVersion->isSet() && expectedShardVersion->isSet()) {
*errmsg = str::stream() << "this shard no longer contains chunks for " << _nss.ns() << ", "
<< "the collection may have been dropped";
return false;
}
if (actualShardVersion->isSet() && !expectedShardVersion->isSet()) {
*errmsg = str::stream() << "this shard contains versioned chunks for " << _nss.ns() << ", "
<< "but no version set in request";
return false;
}
if (actualShardVersion->majorVersion() != expectedShardVersion->majorVersion()) {
// Could be > or < - wanted is > if this is the source of a migration, wanted < if this is
// the target of a migration
*errmsg = str::stream() << "version mismatch detected for " << _nss.ns();
return false;
}
// Those are all the reasons the versions can mismatch
MONGO_UNREACHABLE;
}
示例7: shardVersionOk
/**
* @ return true if not in sharded mode
or if version for this client is ok
*/
bool shardVersionOk( const string& ns , string& errmsg, ConfigVersion& received, ConfigVersion& wanted ) {
if ( ! shardingState.enabled() )
return true;
if ( ! isMasterNs( ns.c_str() ) ) {
// right now connections to secondaries aren't versioned at all
return true;
}
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
if ( ! info ) {
// this means the client has nothing sharded
// so this allows direct connections to do whatever they want
// which i think is the correct behavior
return true;
}
if ( info->inForceVersionOkMode() ) {
return true;
}
// TODO
// all collections at some point, be sharded or not, will have a version (and a ShardChunkManager)
// for now, we remove the sharding state of dropped collection
// so delayed request may come in. This has to be fixed.
ConfigVersion clientVersion = info->getVersion(ns);
ConfigVersion version;
if ( ! shardingState.hasVersion( ns , version ) && ! clientVersion.isSet() ) {
return true;
}
// The versions we're going to compare, saved for future use
received = clientVersion;
wanted = version;
if ( ! version.isSet() && clientVersion.isSet() ) {
stringstream ss;
ss << "collection was dropped or this shard no longer valid version";
errmsg = ss.str();
return false;
}
if ( clientVersion >= version )
return true;
if ( ! clientVersion.isSet() ) {
stringstream ss;
ss << "client in sharded mode, but doesn't have version set for this collection";
errmsg = ss.str();
return false;
}
if ( version.majorVersion() == clientVersion.majorVersion() ) {
// this means there was just a split
// since on a split w/o a migrate this server is ok
// going to accept
return true;
}
stringstream ss;
ss << "your version is too old";
errmsg = ss.str();
return false;
}
示例8: run
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
// Steps
// 1. check basic config
// 2. extract params from command
// 3. fast check
// 4. slow check (LOCKS)
// step 1
lastError.disableForCommand();
ShardedConnectionInfo* info = ShardedConnectionInfo::get( true );
// make sure we have the mongos id for writebacks
if ( ! checkMongosID( info , cmdObj["serverID"] , errmsg ) )
return false;
bool authoritative = cmdObj.getBoolField( "authoritative" );
// check config server is ok or enable sharding
if ( ! checkConfigOrInit( cmdObj["configdb"].valuestrsafe() , authoritative , errmsg , result ) )
return false;
// check shard name/hosts are correct
if ( cmdObj["shard"].type() == String ) {
shardingState.gotShardName( cmdObj["shard"].String() );
shardingState.gotShardHost( cmdObj["shardHost"].String() );
}
// Handle initial shard connection
if( cmdObj["version"].eoo() && cmdObj["init"].trueValue() ) {
result.append( "initialized", true );
return true;
}
// we can run on a slave up to here
if ( ! isMaster( "admin" ) ) {
result.append( "errmsg" , "not master" );
result.append( "note" , "from post init in setShardVersion" );
return false;
}
// step 2
string ns = cmdObj["setShardVersion"].valuestrsafe();
if ( ns.size() == 0 ) {
errmsg = "need to specify namespace";
return false;
}
const ConfigVersion version = ConfigVersion( extractVersion( cmdObj["version"] , errmsg ), OID() );
if ( errmsg.size() )
return false;
// step 3
const ConfigVersion oldVersion = info->getVersion(ns);
const ConfigVersion globalVersion = shardingState.getVersion(ns);
oldVersion.addToBSON( result, "oldVersion" );
if ( globalVersion.isSet() && version.isSet() ) {
// this means there is no reset going on an either side
// so its safe to make some assumptions
if ( version.isEquivalentTo( globalVersion ) ) {
// mongos and mongod agree!
if ( ! oldVersion.isEquivalentTo( version ) ) {
if ( oldVersion < globalVersion ) {
info->setVersion( ns , version );
}
else if ( authoritative ) {
// this means there was a drop and our version is reset
info->setVersion( ns , version );
}
else {
result.append( "ns" , ns );
result.appendBool( "need_authoritative" , true );
errmsg = "verifying drop on '" + ns + "'";
return false;
}
}
return true;
}
}
// step 4
// this is because of a weird segfault I saw and I can't see why this should ever be set
massert( 13647 , str::stream() << "context should be empty here, is: " << cc().getContext()->ns() , cc().getContext() == 0 );
Lock::GlobalWrite setShardVersionLock; // TODO: can we get rid of this??
if ( oldVersion.isSet() && ! globalVersion.isSet() ) {
// this had been reset
info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
}
//.........这里部分代码省略.........
示例9: _handlePossibleShardedMessage
bool _handlePossibleShardedMessage( Message &m, DbResponse* dbresponse ) {
DEV assert( shardingState.enabled() );
int op = m.operation();
if ( op < 2000
|| op >= 3000
|| op == dbGetMore // cursors are weird
)
return false;
DbMessage d(m);
const char *ns = d.getns();
string errmsg;
// We don't care about the version here, since we're returning it later in the writeback
ConfigVersion received, wanted;
if ( shardVersionOk( ns , errmsg, received, wanted ) ) {
return false;
}
LOG(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl;
if ( doesOpGetAResponse( op ) ) {
assert( dbresponse );
BufBuilder b( 32768 );
b.skip( sizeof( QueryResult ) );
{
BSONObj obj = BSON( "$err" << errmsg << "ns" << ns );
b.appendBuf( obj.objdata() , obj.objsize() );
}
QueryResult *qr = (QueryResult*)b.buf();
qr->_resultFlags() = ResultFlag_ErrSet | ResultFlag_ShardConfigStale;
qr->len = b.len();
qr->setOperation( opReply );
qr->cursorId = 0;
qr->startingFrom = 0;
qr->nReturned = 1;
b.decouple();
Message * resp = new Message();
resp->setData( qr , true );
dbresponse->response = resp;
dbresponse->responseTo = m.header()->id;
return true;
}
uassert( 9517 , "writeback" , ( d.reservedField() & DbMessage::Reserved_FromWriteback ) == 0 );
OID writebackID;
writebackID.init();
lastError.getSafe()->writeback( writebackID );
const OID& clientID = ShardedConnectionInfo::get(false)->getID();
massert( 10422 , "write with bad shard config and no server id!" , clientID.isSet() );
LOG(1) << "got write with an old config - writing back ns: " << ns << endl;
LOG(1) << m.toString() << endl;
BSONObjBuilder b;
b.appendBool( "writeBack" , true );
b.append( "ns" , ns );
b.append( "id" , writebackID );
b.append( "connectionId" , cc().getConnectionId() );
b.append( "instanceIdent" , prettyHostName() );
b.appendTimestamp( "version" , shardingState.getVersion( ns ) );
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
b.appendTimestamp( "yourVersion" , info ? info->getVersion(ns) : (ConfigVersion)0 );
b.appendBinData( "msg" , m.header()->len , bdtCustom , (char*)(m.singleData()) );
LOG(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl;
writeBackManager.queueWriteBack( clientID.str() , b.obj() );
return true;
}
示例10: shardVersionOk
/**
* @ return true if not in sharded mode
or if version for this client is ok
*/
bool shardVersionOk( const string& ns , string& errmsg, ChunkVersion& received, ChunkVersion& wanted ) {
if ( ! shardingState.enabled() )
return true;
if ( ! isMasterNs( ns.c_str() ) ) {
// right now connections to secondaries aren't versioned at all
return true;
}
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
if ( ! info ) {
// this means the client has nothing sharded
// so this allows direct connections to do whatever they want
// which i think is the correct behavior
return true;
}
if ( info->inForceVersionOkMode() ) {
return true;
}
// TODO : all collections at some point, be sharded or not, will have a version
// (and a CollectionMetadata)
received = info->getVersion( ns );
wanted = shardingState.getVersion( ns );
if( received.isWriteCompatibleWith( wanted ) ) return true;
//
// Figure out exactly why not compatible, send appropriate error message
// The versions themselves are returned in the error, so not needed in messages here
//
// Check epoch first, to send more meaningful message, since other parameters probably
// won't match either
if( ! wanted.hasCompatibleEpoch( received ) ){
errmsg = str::stream() << "version epoch mismatch detected for " << ns << ", "
<< "the collection may have been dropped and recreated";
return false;
}
if( ! wanted.isSet() && received.isSet() ){
errmsg = str::stream() << "this shard no longer contains chunks for " << ns << ", "
<< "the collection may have been dropped";
return false;
}
if( wanted.isSet() && ! received.isSet() ){
errmsg = str::stream() << "this shard contains versioned chunks for " << ns << ", "
<< "but no version set in request";
return false;
}
if( wanted.majorVersion() != received.majorVersion() ){
//
// Could be > or < - wanted is > if this is the source of a migration,
// wanted < if this is the target of a migration
//
errmsg = str::stream() << "version mismatch detected for " << ns << ", "
<< "stored major version " << wanted.majorVersion()
<< " does not match received " << received.majorVersion();
return false;
}
// Those are all the reasons the versions can mismatch
verify( false );
return false;
}
示例11: run
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
// Debugging code for SERVER-1633. Commands have already a coarser timer for
// normal operation.
Timer timer;
vector<int> laps;
lastError.disableForCommand();
ShardedConnectionInfo* info = ShardedConnectionInfo::get( true );
bool authoritative = cmdObj.getBoolField( "authoritative" );
string configdb = cmdObj["configdb"].valuestrsafe();
{ // configdb checking
if ( configdb.size() == 0 ){
errmsg = "no configdb";
return false;
}
if ( shardingState.enabled() ){
if ( configdb != shardingState.getConfigServer() ){
errmsg = "specified a different configdb!";
return false;
}
}
else {
if ( ! authoritative ){
result.appendBool( "need_authoritative" , true );
errmsg = "first setShardVersion";
return false;
}
shardingState.enable( configdb );
configServer.init( configdb );
}
}
// SERVER-1633
laps.push_back( timer.millis() );
if ( cmdObj["shard"].type() == String ){
shardingState.gotShardName( cmdObj["shard"].String() );
shardingState.gotShardHost( cmdObj["shardHost"].String() );
}
{ // setting up ids
if ( cmdObj["serverID"].type() != jstOID ){
// TODO: fix this
//errmsg = "need serverID to be an OID";
//return 0;
}
else {
OID clientId = cmdObj["serverID"].__oid();
if ( ! info->hasID() ){
info->setID( clientId );
}
else if ( clientId != info->getID() ){
errmsg = "server id has changed!";
return 0;
}
}
}
// SERVER-1633
laps.push_back( timer.millis() );
unsigned long long version = extractVersion( cmdObj["version"] , errmsg );
if ( errmsg.size() ){
return false;
}
string ns = cmdObj["setShardVersion"].valuestrsafe();
if ( ns.size() == 0 ){
errmsg = "need to speciy fully namespace";
return false;
}
ConfigVersion& oldVersion = info->getVersion(ns);
ConfigVersion& globalVersion = shardingState.getVersion(ns);
if ( oldVersion > 0 && globalVersion == 0 ){
// this had been reset
oldVersion = 0;
}
if ( version == 0 && globalVersion == 0 ){
// this connection is cleaning itself
oldVersion = 0;
return 1;
}
// SERVER-1633
laps.push_back( timer.millis() );
if ( version == 0 && globalVersion > 0 ){
if ( ! authoritative ){
result.appendBool( "need_authoritative" , true );
result.appendTimestamp( "globalVersion" , globalVersion );
result.appendTimestamp( "oldVersion" , oldVersion );
errmsg = "dropping needs to be authoritative";
//.........这里部分代码省略.........
示例12: run
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
// Steps
// 1. check basic config
// 2. extract params from command
// 3. fast check
// 4. slow check (LOCKS)
// step 1
lastError.disableForCommand();
ShardedConnectionInfo* info = ShardedConnectionInfo::get( true );
bool authoritative = cmdObj.getBoolField( "authoritative" );
// check config server is ok or enable sharding
if ( ! checkConfigOrInit( cmdObj["configdb"].valuestrsafe() , authoritative , errmsg , result ) )
return false;
// check shard name/hosts are correct
if ( cmdObj["shard"].type() == String ) {
shardingState.gotShardName( cmdObj["shard"].String() );
shardingState.gotShardHost( cmdObj["shardHost"].String() );
}
// make sure we have the mongos id for writebacks
if ( ! checkMongosID( info , cmdObj["serverID"] , errmsg ) )
return false;
// step 2
string ns = cmdObj["setShardVersion"].valuestrsafe();
if ( ns.size() == 0 ) {
errmsg = "need to speciy namespace";
return false;
}
const ConfigVersion version = extractVersion( cmdObj["version"] , errmsg );
if ( errmsg.size() )
return false;
// step 3
const ConfigVersion oldVersion = info->getVersion(ns);
const ConfigVersion globalVersion = shardingState.getVersion(ns);
result.appendTimestamp( "oldVersion" , oldVersion );
if ( globalVersion > 0 && version > 0 ) {
// this means there is no reset going on an either side
// so its safe to make some assuptions
if ( version == globalVersion ) {
// mongos and mongod agree!
if ( oldVersion != version ) {
assert( oldVersion < globalVersion );
info->setVersion( ns , version );
}
return true;
}
}
// step 4
// this is because of a weird segfault I saw and I can't see why this should ever be set
massert( 13647 , str::stream() << "context should be empty here, is: " << cc().getContext()->ns() , cc().getContext() == 0 );
dblock setShardVersionLock; // TODO: can we get rid of this??
if ( oldVersion > 0 && globalVersion == 0 ) {
// this had been reset
info->setVersion( ns , 0 );
}
if ( version == 0 && globalVersion == 0 ) {
// this connection is cleaning itself
info->setVersion( ns , 0 );
return true;
}
if ( version == 0 && globalVersion > 0 ) {
if ( ! authoritative ) {
result.appendBool( "need_authoritative" , true );
result.append( "ns" , ns );
result.appendTimestamp( "globalVersion" , globalVersion );
errmsg = "dropping needs to be authoritative";
return false;
}
log() << "wiping data for: " << ns << endl;
result.appendTimestamp( "beforeDrop" , globalVersion );
// only setting global version on purpose
// need clients to re-find meta-data
shardingState.resetVersion( ns );
info->setVersion( ns , 0 );
return true;
}
if ( version < oldVersion ) {
errmsg = "this connection already had a newer version of collection '" + ns + "'";
//.........这里部分代码省略.........