本文整理汇总了C++中DBConfigPtr::getChunkManagerIfExists方法的典型用法代码示例。如果您正苦于以下问题:C++ DBConfigPtr::getChunkManagerIfExists方法的具体用法?C++ DBConfigPtr::getChunkManagerIfExists怎么用?C++ DBConfigPtr::getChunkManagerIfExists使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DBConfigPtr
的用法示例。
在下文中一共展示了DBConfigPtr::getChunkManagerIfExists方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: refreshChunkCache
// TODO: This refresh logic should be consolidated
void refreshChunkCache( const NamespaceString& nss ) {
DBConfigPtr config = grid.getDBConfig( nss.ns() );
if ( !config->isSharded( nss ) ) return;
// Refreshes chunks as a side-effect
config->getChunkManagerIfExists( nss, true );
}
示例2: forceRemoteCheckShardVersion
bool forceRemoteCheckShardVersion( const string& ns ){
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf ) return false;
conf->reload();
ChunkManagerPtr manager = conf->getChunkManagerIfExists( ns, true, true );
if( ! manager ) return false;
return true;
}
示例3: refreshNow
Status ChunkManagerTargeter::refreshNow( RefreshType refreshType ) {
DBConfigPtr config;
string errMsg;
if ( !getDBConfigSafe( _nss.db(), config, &errMsg ) ) {
return Status( ErrorCodes::DatabaseNotFound, errMsg );
}
// Try not to spam the configs
refreshBackoff();
// TODO: Improve synchronization and make more explicit
if ( refreshType == RefreshType_RefreshChunkManager ) {
try {
// Forces a remote check of the collection info, synchronization between threads
// happens internally.
config->getChunkManagerIfExists( _nss.ns(), true );
}
catch ( const DBException& ex ) {
return Status( ErrorCodes::UnknownError, ex.toString() );
}
config->getChunkManagerOrPrimary( _nss.ns(), _manager, _primary );
}
else if ( refreshType == RefreshType_ReloadDatabase ) {
try {
// Dumps the db info, reloads it all, synchronization between threads happens
// internally.
config->reload();
config->getChunkManagerIfExists( _nss.ns(), true, true );
}
catch ( const DBException& ex ) {
return Status( ErrorCodes::UnknownError, ex.toString() );
}
config->getChunkManagerOrPrimary( _nss.ns(), _manager, _primary );
}
return Status::OK();
}
示例4: forceRemoteCheckShardVersionCB
bool VersionManager::forceRemoteCheckShardVersionCB( const string& ns ){
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf ) return false;
conf->reload();
// If we don't have a collection, don't refresh the chunk manager
if( nsGetCollection( ns ).size() == 0 ) return false;
ChunkManagerPtr manager = conf->getChunkManagerIfExists( ns, true, true );
if( ! manager ) return false;
return true;
}
示例5: checkShardVersion
/**
* @return true if had to do something
*/
bool checkShardVersion( DBClientBase * conn_in , const string& ns , ChunkManagerPtr refManager, bool authoritative , int tryNumber ) {
// TODO: cache, optimize, etc...
WriteBackListener::init( *conn_in );
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf )
return false;
DBClientBase* conn = getVersionable( conn_in );
verify(conn); // errors thrown above
unsigned long long officialSequenceNumber = 0;
ChunkManagerPtr manager;
const bool isSharded = conf->isSharded( ns );
if ( isSharded ) {
manager = conf->getChunkManagerIfExists( ns , authoritative );
// It's possible the chunk manager was reset since we checked whether sharded was true,
// so must check this here.
if( manager ) officialSequenceNumber = manager->getSequenceNumber();
}
// Check this manager against the reference manager
if( isSharded && manager ){
Shard shard = Shard::make( conn->getServerAddress() );
if( refManager && ! refManager->compatibleWith( manager, shard ) ){
throw SendStaleConfigException( ns, str::stream() << "manager (" << manager->getVersion( shard ).toString() << " : " << manager->getSequenceNumber() << ") "
<< "not compatible with reference manager (" << refManager->getVersion( shard ).toString() << " : " << refManager->getSequenceNumber() << ") "
<< "on shard " << shard.getName() << " (" << shard.getAddress().toString() << ")",
refManager->getVersion( shard ), manager->getVersion( shard ) );
}
}
else if( refManager ){
Shard shard = Shard::make( conn->getServerAddress() );
string msg( str::stream() << "not sharded ("
<< ( (manager.get() == 0) ? string( "<none>" ) :
str::stream() << manager->getSequenceNumber() )
<< ") but has reference manager ("
<< refManager->getSequenceNumber() << ") "
<< "on conn " << conn->getServerAddress() << " ("
<< conn_in->getServerAddress() << ")" );
throw SendStaleConfigException( ns, msg,
refManager->getVersion( shard ), ShardChunkVersion( 0, OID() ));
}
// has the ChunkManager been reloaded since the last time we updated the connection-level version?
// (ie., last time we issued the setShardVersions below)
unsigned long long sequenceNumber = connectionShardStatus.getSequence(conn,ns);
if ( sequenceNumber == officialSequenceNumber ) {
return false;
}
ShardChunkVersion version = ShardChunkVersion( 0, OID() );
if ( isSharded && manager ) {
version = manager->getVersion( Shard::make( conn->getServerAddress() ) );
}
if( ! version.isSet() ){
LOG(0) << "resetting shard version of " << ns << " on " << conn->getServerAddress() << ", " <<
( ! isSharded ? "no longer sharded" :
( ! manager ? "no chunk manager found" :
"version is zero" ) ) << endl;
}
LOG(2) << " have to set shard version for conn: " << conn->getServerAddress() << " ns:" << ns
<< " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
<< " version: " << version << " manager: " << manager.get()
<< endl;
const string versionableServerAddress(conn->getServerAddress());
BSONObj result;
if ( setShardVersion( *conn , ns , version , authoritative , result ) ) {
// success!
LOG(1) << " setShardVersion success: " << result << endl;
connectionShardStatus.setSequence( conn , ns , officialSequenceNumber );
return true;
}
LOG(1) << " setShardVersion failed!\n" << result << endl;
if ( result["need_authoritative"].trueValue() )
massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative );
if ( ! authoritative ) {
// use the original connection and get a fresh versionable connection
// since conn can be invalidated (or worse, freed) after the failure
checkShardVersion(conn_in, ns, refManager, 1, tryNumber + 1);
return true;
}
if ( result["reloadConfig"].trueValue() ) {
if( result["version"].timestampTime() == 0 ){
//.........这里部分代码省略.........
示例6: checkShardVersion
/**
* @return true if had to do something
*/
bool checkShardVersion( DBClientBase& conn_in , const string& ns , bool authoritative , int tryNumber ) {
// TODO: cache, optimize, etc...
WriteBackListener::init( conn_in );
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf )
return false;
DBClientBase* conn = 0;
switch ( conn_in.type() ) {
case ConnectionString::INVALID:
assert(0);
break;
case ConnectionString::MASTER:
// great
conn = &conn_in;
break;
case ConnectionString::PAIR:
assert( ! "pair not support for sharding" );
break;
case ConnectionString::SYNC:
// TODO: we should check later that we aren't actually sharded on this
conn = &conn_in;
break;
case ConnectionString::SET:
DBClientReplicaSet* set = (DBClientReplicaSet*)&conn_in;
conn = &(set->masterConn());
break;
}
assert(conn);
unsigned long long officialSequenceNumber = 0;
ChunkManagerPtr manager;
const bool isSharded = conf->isSharded( ns );
if ( isSharded ) {
manager = conf->getChunkManagerIfExists( ns , authoritative );
// It's possible the chunk manager was reset since we checked whether sharded was true,
// so must check this here.
if( manager ) officialSequenceNumber = manager->getSequenceNumber();
}
// has the ChunkManager been reloaded since the last time we updated the connection-level version?
// (ie., last time we issued the setShardVersions below)
unsigned long long sequenceNumber = connectionShardStatus.getSequence(conn,ns);
if ( sequenceNumber == officialSequenceNumber ) {
return false;
}
ShardChunkVersion version = 0;
if ( isSharded && manager ) {
version = manager->getVersion( Shard::make( conn->getServerAddress() ) );
}
LOG(2) << " have to set shard version for conn: " << conn << " ns:" << ns
<< " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
<< " version: " << version << " manager: " << manager.get()
<< endl;
BSONObj result;
if ( setShardVersion( *conn , ns , version , authoritative , result ) ) {
// success!
LOG(1) << " setShardVersion success: " << result << endl;
connectionShardStatus.setSequence( conn , ns , officialSequenceNumber );
return true;
}
LOG(1) << " setShardVersion failed!\n" << result << endl;
if ( result["need_authoritative"].trueValue() )
massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative );
if ( ! authoritative ) {
checkShardVersion( *conn , ns , 1 , tryNumber + 1 );
return true;
}
if ( result["reloadConfig"].trueValue() ) {
if( result["version"].timestampTime() == 0 ){
// reload db
conf->reload();
}
else {
// reload config
conf->getChunkManager( ns , true );
}
}
const int maxNumTries = 7;
if ( tryNumber < maxNumTries ) {
LOG( tryNumber < ( maxNumTries / 2 ) ? 1 : 0 )
<< "going to retry checkShardVersion host: " << conn->getServerAddress() << " " << result << endl;
sleepmillis( 10 * tryNumber );
//.........这里部分代码省略.........
示例7: _doBalanceRound
//.........这里部分代码省略.........
if (shardToChunksMap.map().empty()) {
LOG(1) << "skipping empty collection (" << ns << ")";
continue;
}
for (ShardInfoMap::const_iterator i = shardInfo.begin(); i != shardInfo.end(); ++i) {
// this just makes sure there is an entry in shardToChunksMap for every shard
OwnedPointerVector<ChunkType>*& chunkList =
shardToChunksMap.mutableMap()[i->first];
if (chunkList == NULL) {
chunkList = new OwnedPointerVector<ChunkType>();
}
}
DistributionStatus status(shardInfo, shardToChunksMap.map());
// load tags
Status result = clusterCreateIndex(TagsType::ConfigNS,
BSON(TagsType::ns() << 1 << TagsType::min() << 1),
true, // unique
WriteConcernOptions::AllConfigs,
NULL);
if ( !result.isOK() ) {
warning() << "could not create index tags_1_min_1: " << result.reason() << endl;
continue;
}
cursor = conn.query(TagsType::ConfigNS,
QUERY(TagsType::ns(ns)).sort(TagsType::min()));
vector<TagRange> ranges;
while ( cursor->more() ) {
BSONObj tag = cursor->nextSafe();
TagRange tr(tag[TagsType::min()].Obj().getOwned(),
tag[TagsType::max()].Obj().getOwned(),
tag[TagsType::tag()].String());
ranges.push_back(tr);
uassert(16356,
str::stream() << "tag ranges not valid for: " << ns,
status.addTagRange(tr) );
}
cursor.reset();
DBConfigPtr cfg = grid.getDBConfig( ns );
if ( !cfg ) {
warning() << "could not load db config to balance " << ns << " collection" << endl;
continue;
}
// This line reloads the chunk manager once if this process doesn't know the collection
// is sharded yet.
ChunkManagerPtr cm = cfg->getChunkManagerIfExists( ns, true );
if ( !cm ) {
warning() << "could not load chunks to balance " << ns << " collection" << endl;
continue;
}
// loop through tags to make sure no chunk spans tags; splits on tag min. for all chunks
bool didAnySplits = false;
for ( unsigned i = 0; i < ranges.size(); i++ ) {
BSONObj min = ranges[i].min;
min = cm->getShardKey().extendRangeBound( min, false );
if ( allChunkMinimums.count( min ) > 0 )
continue;
didAnySplits = true;
log() << "ns: " << ns << " need to split on "
<< min << " because there is a range there" << endl;
ChunkPtr c = cm->findIntersectingChunk( min );
vector<BSONObj> splitPoints;
splitPoints.push_back( min );
BSONObj res;
if ( !c->multiSplit( splitPoints, res ) ) {
error() << "split failed: " << res << endl;
}
else {
LOG(1) << "split worked: " << res << endl;
}
break;
}
if ( didAnySplits ) {
// state change, just wait till next round
continue;
}
CandidateChunk* p = _policy->balance( ns, status, _balancedLastTime );
if ( p ) candidateChunks->push_back( CandidateChunkPtr( p ) );
}
}
示例8: checkShardVersion
/**
* Updates the remote cached version on the remote shard host (primary, in the case of replica
* sets) if needed with a fully-qualified shard version for the given namespace:
* config server(s) + shard name + shard version
*
* If no remote cached version has ever been set, an initial shard version is sent.
*
* If the namespace is empty and no version has ever been sent, the config server + shard name
* is sent to the remote shard host to initialize the connection as coming from mongos.
* NOTE: This initialization is *best-effort only*. Operations which wish to correctly version
* must send the namespace.
*
* Config servers are special and are not (unless otherwise a shard) kept up to date with this
* protocol. This is safe so long as config servers only contain unversioned collections.
*
* It is an error to call checkShardVersion with an unversionable connection (isVersionableCB).
*
* @return true if we contacted the remote host
*/
bool checkShardVersion( DBClientBase * conn_in , const string& ns , ChunkManagerPtr refManager, bool authoritative , int tryNumber ) {
// TODO: cache, optimize, etc...
// Empty namespaces are special - we require initialization but not versioning
if (ns.size() == 0) {
return initShardVersionEmptyNS(conn_in);
}
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf )
return false;
DBClientBase* conn = getVersionable( conn_in );
verify(conn); // errors thrown above
unsigned long long officialSequenceNumber = 0;
ShardPtr primary;
ChunkManagerPtr manager;
if (authoritative)
conf->getChunkManagerIfExists(ns, true);
conf->getChunkManagerOrPrimary(ns, manager, primary);
if (manager)
officialSequenceNumber = manager->getSequenceNumber();
// Check this manager against the reference manager
if( manager ){
Shard shard = Shard::make( conn->getServerAddress() );
if (refManager && !refManager->compatibleWith(*manager, shard.getName())) {
const ChunkVersion refVersion(refManager->getVersion(shard.getName()));
const ChunkVersion currentVersion(manager->getVersion(shard.getName()));
string msg(str::stream() << "manager ("
<< currentVersion.toString()
<< " : " << manager->getSequenceNumber() << ") "
<< "not compatible with reference manager ("
<< refVersion.toString()
<< " : " << refManager->getSequenceNumber() << ") "
<< "on shard " << shard.getName()
<< " (" << shard.getAddress().toString() << ")");
throw SendStaleConfigException(ns,
msg,
refVersion,
currentVersion);
}
}
else if( refManager ){
Shard shard = Shard::make(conn->getServerAddress());
string msg( str::stream() << "not sharded ("
<< ( (manager.get() == 0) ? string( "<none>" ) :
str::stream() << manager->getSequenceNumber() )
<< ") but has reference manager ("
<< refManager->getSequenceNumber() << ") "
<< "on conn " << conn->getServerAddress() << " ("
<< conn_in->getServerAddress() << ")" );
throw SendStaleConfigException(ns,
msg,
refManager->getVersion(shard.getName()),
ChunkVersion::UNSHARDED());
}
// Do not send setShardVersion to collections on the config servers - this causes problems
// when config servers are also shards and get SSV with conflicting names.
// TODO: Make config servers regular shards
if (primary && primary->getName() == "config") {
return false;
}
// Has the ChunkManager been reloaded since the last time we updated the shard version over
// this connection? If we've never updated the shard version, do so now.
unsigned long long sequenceNumber = 0;
if (connectionShardStatus.getSequence(conn, ns, &sequenceNumber)) {
if (sequenceNumber == officialSequenceNumber) {
return false;
}
}
//.........这里部分代码省略.........
示例9: run
//.........这里部分代码省略.........
}
else if( lastNeededVersion ) {
log() << "new version change detected to " << needVersion.toString()
<< ", " << lastNeededCount << " writebacks processed at "
<< lastNeededVersion->toString() << endl;
lastNeededCount = 0;
}
//
// Set our lastNeededVersion for next time
//
lastNeededVersion.reset( new ChunkVersion( needVersion ) );
lastNeededCount++;
//
// Determine if we should reload, if so, reload
//
bool shouldReload = ! needVersion.isWriteCompatibleWith( currVersion ) &&
! alreadyReloaded;
if( shouldReload && currVersion.isSet()
&& needVersion.isSet()
&& currVersion.hasCompatibleEpoch( needVersion ) )
{
//
// If we disagree about versions only, reload the chunk manager
//
db->getChunkManagerIfExists( ns, true );
}
else if( shouldReload ){
//
// If we disagree about anything else, reload the full db
//
warning() << "reloading config data for " << db->getName() << ", "
<< "wanted version " << needVersion.toString()
<< " but currently have version " << currVersion.toString() << endl;
db->reload();
}
// do request and then call getLastError
// we have to call getLastError so we can return the right fields to the user if they decide to call getLastError
BSONObj gle;
int attempts = 0;
while ( true ) {
attempts++;
try {
Request r( msg , 0 );
r.init();
r.d().reservedField() |= Reserved_FromWriteback;
ClientInfo * ci = r.getClientInfo();
if (!noauth) {
ci->getAuthorizationManager()->grantInternalAuthorization(
示例10: checkShardVersion
/**
* @return true if had to do something
*/
bool checkShardVersion( DBClientBase& conn_in , const string& ns , bool authoritative , int tryNumber ) {
// TODO: cache, optimize, etc...
WriteBackListener::init( conn_in );
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf )
return false;
DBClientBase* conn = getVersionable( &conn_in );
assert(conn); // errors thrown above
unsigned long long officialSequenceNumber = 0;
ChunkManagerPtr manager;
const bool isSharded = conf->isSharded( ns );
if ( isSharded ) {
manager = conf->getChunkManagerIfExists( ns , authoritative );
// It's possible the chunk manager was reset since we checked whether sharded was true,
// so must check this here.
if( manager ) officialSequenceNumber = manager->getSequenceNumber();
}
// has the ChunkManager been reloaded since the last time we updated the connection-level version?
// (ie., last time we issued the setShardVersions below)
unsigned long long sequenceNumber = connectionShardStatus.getSequence(conn,ns);
if ( sequenceNumber == officialSequenceNumber ) {
return false;
}
ShardChunkVersion version = 0;
if ( isSharded && manager ) {
version = manager->getVersion( Shard::make( conn->getServerAddress() ) );
}
if( version == 0 ){
LOG(0) << "resetting shard version of " << ns << " on " << conn->getServerAddress() << ", " <<
( ! isSharded ? "no longer sharded" :
( ! manager ? "no chunk manager found" :
"version is zero" ) ) << endl;
}
LOG(2) << " have to set shard version for conn: " << conn << " ns:" << ns
<< " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
<< " version: " << version << " manager: " << manager.get()
<< endl;
BSONObj result;
if ( setShardVersion( *conn , ns , version , authoritative , result ) ) {
// success!
LOG(1) << " setShardVersion success: " << result << endl;
connectionShardStatus.setSequence( conn , ns , officialSequenceNumber );
return true;
}
LOG(1) << " setShardVersion failed!\n" << result << endl;
if ( result["need_authoritative"].trueValue() )
massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative );
if ( ! authoritative ) {
checkShardVersion( *conn , ns , 1 , tryNumber + 1 );
return true;
}
if ( result["reloadConfig"].trueValue() ) {
if( result["version"].timestampTime() == 0 ){
// reload db
conf->reload();
}
else {
// reload config
conf->getChunkManager( ns , true );
}
}
const int maxNumTries = 7;
if ( tryNumber < maxNumTries ) {
LOG( tryNumber < ( maxNumTries / 2 ) ? 1 : 0 )
<< "going to retry checkShardVersion host: " << conn->getServerAddress() << " " << result << endl;
sleepmillis( 10 * tryNumber );
checkShardVersion( *conn , ns , true , tryNumber + 1 );
return true;
}
string errmsg = str::stream() << "setShardVersion failed host: " << conn->getServerAddress() << " " << result;
log() << " " << errmsg << endl;
massert( 10429 , errmsg , 0 );
return true;
}
示例11: _doBalanceRound
//.........这里部分代码省略.........
set<BSONObj> allChunkMinimums;
while ( cursor->more() ) {
BSONObj chunk = cursor->nextSafe().getOwned();
vector<BSONObj>& chunks = shardToChunksMap[chunk[ChunkType::shard()].String()];
allChunkMinimums.insert( chunk[ChunkType::min()].Obj() );
chunks.push_back( chunk );
}
cursor.reset();
if (shardToChunksMap.empty()) {
LOG(1) << "skipping empty collection (" << ns << ")";
continue;
}
for ( vector<Shard>::iterator i=allShards.begin(); i!=allShards.end(); ++i ) {
// this just makes sure there is an entry in shardToChunksMap for every shard
Shard s = *i;
shardToChunksMap[s.getName()].size();
}
DistributionStatus status( shardInfo, shardToChunksMap );
// load tags
conn.ensureIndex(TagsType::ConfigNS,
BSON(TagsType::ns() << 1 << TagsType::min() << 1),
true);
cursor = conn.query(TagsType::ConfigNS,
QUERY(TagsType::ns(ns)).sort(TagsType::min()));
vector<TagRange> ranges;
while ( cursor->more() ) {
BSONObj tag = cursor->nextSafe();
TagRange tr(tag[TagsType::min()].Obj().getOwned(),
tag[TagsType::max()].Obj().getOwned(),
tag[TagsType::tag()].String());
ranges.push_back(tr);
uassert(16356,
str::stream() << "tag ranges not valid for: " << ns,
status.addTagRange(tr) );
}
cursor.reset();
DBConfigPtr cfg = grid.getDBConfig( ns );
if ( !cfg ) {
warning() << "could not load db config to balance " << ns << " collection" << endl;
continue;
}
// This line reloads the chunk manager once if this process doesn't know the collection
// is sharded yet.
ChunkManagerPtr cm = cfg->getChunkManagerIfExists( ns, true );
if ( !cm ) {
warning() << "could not load chunks to balance " << ns << " collection" << endl;
continue;
}
// loop through tags to make sure no chunk spans tags; splits on tag min. for all chunks
bool didAnySplits = false;
for ( unsigned i = 0; i < ranges.size(); i++ ) {
BSONObj min = ranges[i].min;
min = cm->getShardKey().extendRangeBound( min, false );
if ( allChunkMinimums.count( min ) > 0 )
continue;
didAnySplits = true;
log() << "ns: " << ns << " need to split on "
<< min << " because there is a range there" << endl;
ChunkPtr c = cm->findIntersectingChunk( min );
vector<BSONObj> splitPoints;
splitPoints.push_back( min );
BSONObj res;
if ( !c->multiSplit( splitPoints, res ) ) {
error() << "split failed: " << res << endl;
}
else {
LOG(1) << "split worked: " << res << endl;
}
break;
}
if ( didAnySplits ) {
// state change, just wait till next round
continue;
}
CandidateChunk* p = _policy->balance( ns, status, _balancedLastTime );
if ( p ) candidateChunks->push_back( CandidateChunkPtr( p ) );
}
}