本文整理汇总了C++中ShardingState类的典型用法代码示例。如果您正苦于以下问题:C++ ShardingState类的具体用法?C++ ShardingState怎么用?C++ ShardingState使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ShardingState类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: logDeleteOpForSharding
void logDeleteOpForSharding(OperationContext* txn,
const char* ns,
const BSONObj& obj,
bool notInActiveChunk) {
ShardingState* shardingState = ShardingState::get(txn);
if (shardingState->enabled())
shardingState->migrationSourceManager()->logDeleteOp(txn, ns, obj, notInActiveChunk);
}
示例2: _enabled
namespace mongo {
// -----ShardingState START ----
ShardingState::ShardingState()
: _enabled(false) , _mutex( "ShardingState" ) {
}
void ShardingState::enable( const string& server ) {
_enabled = true;
assert( server.size() );
if ( _configServer.size() == 0 )
_configServer = server;
else {
assert( server == _configServer );
}
}
void ShardingState::gotShardName( const string& name ) {
if ( _shardName.size() == 0 ) {
// TODO SERVER-2299 verify the name is sound w.r.t IPs
_shardName = name;
return;
}
if ( _shardName == name )
return;
stringstream ss;
ss << "gotShardName different than what i had before "
<< " before [" << _shardName << "] "
<< " got [" << name << "] "
;
uasserted( 13298 , ss.str() );
}
void ShardingState::gotShardHost( string host ) {
size_t slash = host.find( '/' );
if ( slash != string::npos )
host = host.substr( 0 , slash );
if ( _shardHost.size() == 0 ) {
_shardHost = host;
return;
}
if ( _shardHost == host )
return;
stringstream ss;
ss << "gotShardHost different than what i had before "
<< " before [" << _shardHost << "] "
<< " got [" << host << "] "
;
uasserted( 13299 , ss.str() );
}
void ShardingState::resetShardingState() {
scoped_lock lk(_mutex);
_enabled = false;
_configServer.clear();
_shardName.clear();
_shardHost.clear();
_chunks.clear();
}
// TODO we shouldn't need three ways for checking the version. Fix this.
bool ShardingState::hasVersion( const string& ns ) {
scoped_lock lk(_mutex);
ChunkManagersMap::const_iterator it = _chunks.find(ns);
return it != _chunks.end();
}
bool ShardingState::hasVersion( const string& ns , ConfigVersion& version ) {
scoped_lock lk(_mutex);
ChunkManagersMap::const_iterator it = _chunks.find(ns);
if ( it == _chunks.end() )
return false;
ShardChunkManagerPtr p = it->second;
version = p->getVersion();
return true;
}
const ConfigVersion ShardingState::getVersion( const string& ns ) const {
scoped_lock lk(_mutex);
ChunkManagersMap::const_iterator it = _chunks.find( ns );
if ( it != _chunks.end() ) {
ShardChunkManagerPtr p = it->second;
return p->getVersion();
}
else {
return 0;
}
}
//.........这里部分代码省略.........
示例3: _enabled
namespace mongo {
// -----ShardingState START ----
ShardingState::ShardingState()
: _enabled(false) , _mutex( "ShardingState" ){
}
void ShardingState::enable( const string& server ){
_enabled = true;
assert( server.size() );
if ( _configServer.size() == 0 )
_configServer = server;
else {
assert( server == _configServer );
}
}
void ShardingState::gotShardName( const string& name ){
if ( _shardName.size() == 0 ){
_shardName = name;
return;
}
if ( _shardName == name )
return;
stringstream ss;
ss << "gotShardName different than what i had before "
<< " before [" << _shardName << "] "
<< " got [" << name << "] "
;
uasserted( 13298 , ss.str() );
}
void ShardingState::gotShardHost( string host ){
size_t slash = host.find( '/' );
if ( slash != string::npos )
host = host.substr( 0 , slash );
if ( _shardHost.size() == 0 ){
_shardHost = host;
return;
}
if ( _shardHost == host )
return;
stringstream ss;
ss << "gotShardHost different than what i had before "
<< " before [" << _shardHost << "] "
<< " got [" << host << "] "
;
uasserted( 13299 , ss.str() );
}
bool ShardingState::hasVersion( const string& ns ){
scoped_lock lk(_mutex);
NSVersionMap::const_iterator i = _versions.find(ns);
return i != _versions.end();
}
bool ShardingState::hasVersion( const string& ns , ConfigVersion& version ){
scoped_lock lk(_mutex);
NSVersionMap::const_iterator i = _versions.find(ns);
if ( i == _versions.end() )
return false;
version = i->second;
return true;
}
const ConfigVersion ShardingState::getVersion( const string& ns ) const {
scoped_lock lk(_mutex);
NSVersionMap::const_iterator it = _versions.find( ns );
if ( it != _versions.end() ) {
return it->second;
} else {
return 0;
}
}
void ShardingState::setVersion( const string& ns , const ConfigVersion& version ){
scoped_lock lk(_mutex);
if ( version != 0 ) {
NSVersionMap::const_iterator it = _versions.find( ns );
// TODO 11-18-2010 as we're bringing chunk boundary information to mongod, it may happen that
// we're setting a version for the ns that the shard knows about already (e.g because it set
// it itself in a chunk migration)
// eventually, the only cases to issue a setVersion would be
// 1) First chunk of a collection, for version 1|0
// 2) Drop of a collection, for version 0|0
// 3) Load of the shard's chunk state, in a primary-secondary failover
assert( it == _versions.end() || version >= it->second );
}
//.........这里部分代码省略.........
示例4: mergeChunks
bool mergeChunks(OperationContext* txn,
const NamespaceString& nss,
const BSONObj& minKey,
const BSONObj& maxKey,
const OID& epoch,
string* errMsg) {
// Get the distributed lock
string whyMessage = stream() << "merging chunks in " << nss.ns() << " from " << minKey << " to "
<< maxKey;
auto scopedDistLock = grid.catalogManager(txn)->distLock(
txn, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
*errMsg = stream() << "could not acquire collection lock for " << nss.ns()
<< " to merge chunks in [" << minKey << "," << maxKey << ")"
<< causedBy(scopedDistLock.getStatus());
warning() << *errMsg;
return false;
}
ShardingState* shardingState = ShardingState::get(txn);
//
// We now have the collection lock, refresh metadata to latest version and sanity check
//
ChunkVersion shardVersion;
Status status = shardingState->refreshMetadataNow(txn, nss.ns(), &shardVersion);
if (!status.isOK()) {
*errMsg = str::stream() << "could not merge chunks, failed to refresh metadata for "
<< nss.ns() << causedBy(status.reason());
warning() << *errMsg;
return false;
}
if (epoch.isSet() && shardVersion.epoch() != epoch) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " has changed"
<< " since merge was sent"
<< "(sent epoch : " << epoch.toString()
<< ", current epoch : " << shardVersion.epoch().toString() << ")";
warning() << *errMsg;
return false;
}
shared_ptr<CollectionMetadata> metadata = shardingState->getCollectionMetadata(nss.ns());
if (!metadata || metadata->getKeyPattern().isEmpty()) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns()
<< " is not sharded";
warning() << *errMsg;
return false;
}
dassert(metadata->getShardVersion().equals(shardVersion));
if (!metadata->isValidKey(minKey) || !metadata->isValidKey(maxKey)) {
*errMsg = stream() << "could not merge chunks, the range " << rangeToString(minKey, maxKey)
<< " is not valid"
<< " for collection " << nss.ns() << " with key pattern "
<< metadata->getKeyPattern();
warning() << *errMsg;
return false;
}
//
// Get merged chunk information
//
ChunkVersion mergeVersion = metadata->getCollVersion();
mergeVersion.incMinor();
std::vector<ChunkType> chunksToMerge;
ChunkType itChunk;
itChunk.setMin(minKey);
itChunk.setMax(minKey);
itChunk.setNS(nss.ns());
itChunk.setShard(shardingState->getShardName());
while (itChunk.getMax().woCompare(maxKey) < 0 &&
metadata->getNextChunk(itChunk.getMax(), &itChunk)) {
chunksToMerge.push_back(itChunk);
}
if (chunksToMerge.empty()) {
*errMsg = stream() << "could not merge chunks, collection " << nss.ns()
<< " range starting at " << minKey << " and ending at " << maxKey
<< " does not belong to shard " << shardingState->getShardName();
warning() << *errMsg;
return false;
}
//
//.........这里部分代码省略.........
示例5: _enabled
namespace mongo {
// -----ShardingState START ----
ShardingState::ShardingState()
: _enabled(false) , _mutex( "ShardingState" ),
_configServerTickets( 3 /* max number of concurrent config server refresh threads */ ) {
}
void ShardingState::enable( const string& server ) {
_enabled = true;
verify( server.size() );
if ( _configServer.size() == 0 )
_configServer = server;
else {
verify( server == _configServer );
}
}
void ShardingState::gotShardName( const string& name ) {
scoped_lock lk(_mutex);
if ( _shardName.size() == 0 ) {
// TODO SERVER-2299 verify the name is sound w.r.t IPs
_shardName = name;
return;
}
if ( _shardName == name )
return;
stringstream ss;
ss << "gotShardName different than what i had before "
<< " before [" << _shardName << "] "
<< " got [" << name << "] "
;
msgasserted( 13298 , ss.str() );
}
void ShardingState::gotShardHost( string host ) {
scoped_lock lk(_mutex);
size_t slash = host.find( '/' );
if ( slash != string::npos )
host = host.substr( 0 , slash );
if ( _shardHost.size() == 0 ) {
_shardHost = host;
return;
}
if ( _shardHost == host )
return;
stringstream ss;
ss << "gotShardHost different than what i had before "
<< " before [" << _shardHost << "] "
<< " got [" << host << "] "
;
msgasserted( 13299 , ss.str() );
}
void ShardingState::resetShardingState() {
scoped_lock lk(_mutex);
_enabled = false;
_configServer.clear();
_shardName.clear();
_shardHost.clear();
_chunks.clear();
}
// TODO we shouldn't need three ways for checking the version. Fix this.
bool ShardingState::hasVersion( const string& ns ) {
scoped_lock lk(_mutex);
ChunkManagersMap::const_iterator it = _chunks.find(ns);
return it != _chunks.end();
}
bool ShardingState::hasVersion( const string& ns , ConfigVersion& version ) {
scoped_lock lk(_mutex);
ChunkManagersMap::const_iterator it = _chunks.find(ns);
if ( it == _chunks.end() )
return false;
ShardChunkManagerPtr p = it->second;
version = p->getVersion();
return true;
}
const ConfigVersion ShardingState::getVersion( const string& ns ) const {
scoped_lock lk(_mutex);
ChunkManagersMap::const_iterator it = _chunks.find( ns );
if ( it != _chunks.end() ) {
ShardChunkManagerPtr p = it->second;
return p->getVersion();
}
else {
return ConfigVersion( 0, OID() );
//.........这里部分代码省略.........
示例6: _enabled
namespace mongo {
// -----ShardingState START ----
ShardingState::ShardingState()
: _enabled(false) , _mutex( "ShardingState" ),
_configServerTickets( 3 /* max number of concurrent config server refresh threads */ ) {
}
void ShardingState::enable( const string& server ) {
scoped_lock lk(_mutex);
_enabled = true;
verify( server.size() );
if ( _configServer.size() == 0 )
_configServer = server;
else {
verify( server == _configServer );
}
}
void ShardingState::initialize(const string& server) {
ShardedConnectionInfo::addHook();
shardingState.enable(server);
configServer.init(server);
}
bool ShardingState::setShardName( const string& name ) {
scoped_lock lk(_mutex);
if ( _shardName.size() == 0 ) {
// TODO SERVER-2299 remotely verify the name is sound w.r.t IPs
_shardName = name;
string clientAddr = cc().clientAddress(true);
log() << "remote client " << clientAddr << " initialized this host as shard " << name;
return true;
}
if ( _shardName == name )
return true;
string clientAddr = cc().clientAddress(true);
warning() << "remote client " << clientAddr << " tried to initialize this host as shard "
<< name << ", but shard name was previously initialized as " << _shardName;
return false;
}
void ShardingState::gotShardName( const string& name ) {
if ( setShardName( name ) )
return;
string clientAddr = cc().clientAddress(true);
stringstream ss;
// Same error as above, to match for reporting
ss << "remote client " << clientAddr << " tried to initialize this host as shard " << name
<< ", but shard name was previously initialized as " << _shardName;
msgasserted( 13298 , ss.str() );
}
void ShardingState::resetShardingState() {
scoped_lock lk(_mutex);
_enabled = false;
_configServer.clear();
_shardName.clear();
_collMetadata.clear();
}
// TODO we shouldn't need three ways for checking the version. Fix this.
bool ShardingState::hasVersion( const string& ns ) {
scoped_lock lk(_mutex);
CollectionMetadataMap::const_iterator it = _collMetadata.find(ns);
return it != _collMetadata.end();
}
bool ShardingState::hasVersion( const string& ns , ChunkVersion& version ) {
scoped_lock lk(_mutex);
CollectionMetadataMap::const_iterator it = _collMetadata.find(ns);
if ( it == _collMetadata.end() )
return false;
CollectionMetadataPtr p = it->second;
version = p->getShardVersion();
return true;
}
const ChunkVersion ShardingState::getVersion( const string& ns ) const {
scoped_lock lk(_mutex);
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
if ( it != _collMetadata.end() ) {
CollectionMetadataPtr p = it->second;
return p->getShardVersion();
}
else {
return ChunkVersion( 0, OID() );
//.........这里部分代码省略.........
示例7: run
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string ns = cmdObj["getShardVersion"].valuestrsafe();
if ( ns.size() == 0 ) {
errmsg = "need to specify full namespace";
return false;
}
result.append( "configServer" , shardingState.getConfigServer() );
result.appendTimestamp( "global" , shardingState.getVersion(ns).toLong() );
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
result.appendBool( "inShardedMode" , info != 0 );
if ( info )
result.appendTimestamp( "mine" , info->getVersion(ns).toLong() );
else
result.appendTimestamp( "mine" , 0 );
if ( cmdObj["fullMetadata"].trueValue() ) {
CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( ns );
if ( metadata ) result.append( "metadata", metadata->toBSON() );
else result.append( "metadata", BSONObj() );
}
return true;
}
示例8: checkConfigOrInit
bool checkConfigOrInit( const string& configdb , bool authoritative , string& errmsg , BSONObjBuilder& result , bool locked=false ) const {
if ( configdb.size() == 0 ) {
errmsg = "no configdb";
return false;
}
if ( shardingState.enabled() ) {
if ( configdb == shardingState.getConfigServer() )
return true;
result.append( "configdb" , BSON( "stored" << shardingState.getConfigServer() <<
"given" << configdb ) );
errmsg = str::stream() << "mongos specified a different config database string : "
<< "stored : " << shardingState.getConfigServer()
<< " vs given : " << configdb;
return false;
}
if ( ! authoritative ) {
result.appendBool( "need_authoritative" , true );
errmsg = "first setShardVersion";
return false;
}
if ( locked ) {
ShardedConnectionInfo::addHook();
shardingState.enable( configdb );
configServer.init( configdb );
return true;
}
Lock::GlobalWrite lk;
return checkConfigOrInit( configdb , authoritative , errmsg , result , true );
}
示例9: checkConfigOrInit
bool checkConfigOrInit( const string& configdb , bool authoritative , string& errmsg , BSONObjBuilder& result , bool locked=false ) const {
if ( configdb.size() == 0 ) {
errmsg = "no configdb";
return false;
}
if ( shardingState.enabled() ) {
if ( configdb == shardingState.getConfigServer() )
return true;
result.append( "configdb" , BSON( "stored" << shardingState.getConfigServer() <<
"given" << configdb ) );
errmsg = "specified a different configdb!";
return false;
}
if ( ! authoritative ) {
result.appendBool( "need_authoritative" , true );
errmsg = "first setShardVersion";
return false;
}
if ( locked ) {
shardingState.enable( configdb );
configServer.init( configdb );
return true;
}
dblock lk;
return checkConfigOrInit( configdb , authoritative , errmsg , result , true );
}
示例10: shardVersionOk
/**
* @ return true if not in sharded mode
or if version for this client is ok
*/
bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ) {
if ( ! shardingState.enabled() )
return true;
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
if ( ! info ) {
// this means the client has nothing sharded
// so this allows direct connections to do whatever they want
// which i think is the correct behavior
return true;
}
if ( info->inForceVersionOkMode() ) {
return true;
}
// TODO
// all collections at some point, be sharded or not, will have a version (and a ShardChunkManager)
// for now, we remove the sharding state of dropped collection
// so delayed request may come in. This has to be fixed.
ConfigVersion clientVersion = info->getVersion(ns);
ConfigVersion version;
if ( ! shardingState.hasVersion( ns , version ) && clientVersion == 0 ) {
return true;
}
if ( version == 0 && clientVersion > 0 ) {
stringstream ss;
ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion;
errmsg = ss.str();
return false;
}
if ( clientVersion >= version )
return true;
if ( clientVersion == 0 ) {
stringstream ss;
ss << "client in sharded mode, but doesn't have version set for this collection: " << ns << " myVersion: " << version;
errmsg = ss.str();
return false;
}
if ( version.majorVersion() == clientVersion.majorVersion() ) {
// this means there was just a split
// since on a split w/o a migrate this server is ok
// going to accept
return true;
}
stringstream ss;
ss << "your version is too old ns: " + ns << " global: " << version << " client: " << clientVersion;
errmsg = ss.str();
return false;
}
示例11: haveLocalShardingInfo
bool haveLocalShardingInfo( const string& ns ) {
if ( ! shardingState.enabled() )
return false;
if ( ! shardingState.hasVersion( ns ) )
return false;
return ShardedConnectionInfo::get(false) > 0;
}
示例12: shardVersionOk
/**
* @ return true if not in sharded mode
or if version for this client is ok
*/
bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ){
if ( ! shardingState.enabled() )
return true;
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
if ( ! info ){
// this means the client has nothing sharded
// so this allows direct connections to do whatever they want
// which i think is the correct behavior
return true;
}
if ( info->inForceVersionOkMode() ){
return true;
}
ConfigVersion version;
if ( ! shardingState.hasVersion( ns , version ) ){
return true;
}
ConfigVersion clientVersion = info->getVersion(ns);
if ( version == 0 && clientVersion > 0 ){
stringstream ss;
ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion;
errmsg = ss.str();
return false;
}
if ( clientVersion >= version )
return true;
if ( clientVersion == 0 ){
stringstream ss;
ss << "client in sharded mode, but doesn't have version set for this collection: " << ns << " myVersion: " << version;
errmsg = ss.str();
return false;
}
if ( isWriteOp && version.majorVersion() == clientVersion.majorVersion() ){
// this means there was just a split
// since on a split w/o a migrate this server is ok
// going to accept write
return true;
}
stringstream ss;
ss << "your version is too old ns: " + ns << " global: " << version << " client: " << clientVersion;
errmsg = ss.str();
return false;
}
示例13: run
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string ns = cmdObj["getShardVersion"].valuestrsafe();
if ( ns.size() == 0 ) {
errmsg = "need to speciy fully namespace";
return false;
}
result.append( "configServer" , shardingState.getConfigServer() );
result.appendTimestamp( "global" , shardingState.getVersion(ns) );
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
if ( info )
result.appendTimestamp( "mine" , info->getVersion(ns) );
else
result.appendTimestamp( "mine" , 0 );
return true;
}
示例14: run
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
// Debugging code for SERVER-1633. Commands have already a coarser timer for
// normal operation.
Timer timer;
vector<int> laps;
lastError.disableForCommand();
ShardedConnectionInfo* info = ShardedConnectionInfo::get( true );
bool authoritative = cmdObj.getBoolField( "authoritative" );
string configdb = cmdObj["configdb"].valuestrsafe();
{ // configdb checking
if ( configdb.size() == 0 ){
errmsg = "no configdb";
return false;
}
if ( shardingState.enabled() ){
if ( configdb != shardingState.getConfigServer() ){
errmsg = "specified a different configdb!";
return false;
}
}
else {
if ( ! authoritative ){
result.appendBool( "need_authoritative" , true );
errmsg = "first setShardVersion";
return false;
}
shardingState.enable( configdb );
configServer.init( configdb );
}
}
// SERVER-1633
laps.push_back( timer.millis() );
if ( cmdObj["shard"].type() == String ){
shardingState.gotShardName( cmdObj["shard"].String() );
shardingState.gotShardHost( cmdObj["shardHost"].String() );
}
{ // setting up ids
if ( cmdObj["serverID"].type() != jstOID ){
// TODO: fix this
//errmsg = "need serverID to be an OID";
//return 0;
}
else {
OID clientId = cmdObj["serverID"].__oid();
if ( ! info->hasID() ){
info->setID( clientId );
}
else if ( clientId != info->getID() ){
errmsg = "server id has changed!";
return 0;
}
}
}
// SERVER-1633
laps.push_back( timer.millis() );
unsigned long long version = extractVersion( cmdObj["version"] , errmsg );
if ( errmsg.size() ){
return false;
}
string ns = cmdObj["setShardVersion"].valuestrsafe();
if ( ns.size() == 0 ){
errmsg = "need to speciy fully namespace";
return false;
}
ConfigVersion& oldVersion = info->getVersion(ns);
ConfigVersion& globalVersion = shardingState.getVersion(ns);
if ( oldVersion > 0 && globalVersion == 0 ){
// this had been reset
oldVersion = 0;
}
if ( version == 0 && globalVersion == 0 ){
// this connection is cleaning itself
oldVersion = 0;
return 1;
}
// SERVER-1633
laps.push_back( timer.millis() );
if ( version == 0 && globalVersion > 0 ){
if ( ! authoritative ){
result.appendBool( "need_authoritative" , true );
result.appendTimestamp( "globalVersion" , globalVersion );
result.appendTimestamp( "oldVersion" , oldVersion );
errmsg = "dropping needs to be authoritative";
//.........这里部分代码省略.........
示例15: shardVersionOk
/**
* @ return true if not in sharded mode
or if version for this client is ok
*/
bool shardVersionOk( const string& ns , string& errmsg, ConfigVersion& received, ConfigVersion& wanted ) {
if ( ! shardingState.enabled() )
return true;
if ( ! isMasterNs( ns.c_str() ) ) {
// right now connections to secondaries aren't versioned at all
return true;
}
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
if ( ! info ) {
// this means the client has nothing sharded
// so this allows direct connections to do whatever they want
// which i think is the correct behavior
return true;
}
if ( info->inForceVersionOkMode() ) {
return true;
}
// TODO
// all collections at some point, be sharded or not, will have a version (and a ShardChunkManager)
// for now, we remove the sharding state of dropped collection
// so delayed request may come in. This has to be fixed.
ConfigVersion clientVersion = info->getVersion(ns);
ConfigVersion version;
if ( ! shardingState.hasVersion( ns , version ) && ! clientVersion.isSet() ) {
return true;
}
// The versions we're going to compare, saved for future use
received = clientVersion;
wanted = version;
if ( ! version.isSet() && clientVersion.isSet() ) {
stringstream ss;
ss << "collection was dropped or this shard no longer valid version";
errmsg = ss.str();
return false;
}
if ( clientVersion >= version )
return true;
if ( ! clientVersion.isSet() ) {
stringstream ss;
ss << "client in sharded mode, but doesn't have version set for this collection";
errmsg = ss.str();
return false;
}
if ( version.majorVersion() == clientVersion.majorVersion() ) {
// this means there was just a split
// since on a split w/o a migrate this server is ok
// going to accept
return true;
}
stringstream ss;
ss << "your version is too old";
errmsg = ss.str();
return false;
}