本文整理汇总了C++中DBConfigPtr::reload方法的典型用法代码示例。如果您正苦于以下问题:C++ DBConfigPtr::reload方法的具体用法?C++ DBConfigPtr::reload怎么用?C++ DBConfigPtr::reload使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DBConfigPtr
的用法示例。
在下文中一共展示了DBConfigPtr::reload方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: forceRemoteCheckShardVersion
bool forceRemoteCheckShardVersion( const string& ns ){
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf ) return false;
conf->reload();
ChunkManagerPtr manager = conf->getChunkManagerIfExists( ns, true, true );
if( ! manager ) return false;
return true;
}
示例2: forceRemoteCheckShardVersionCB
bool VersionManager::forceRemoteCheckShardVersionCB( const string& ns ){
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf ) return false;
conf->reload();
// If we don't have a collection, don't refresh the chunk manager
if( nsGetCollection( ns ).size() == 0 ) return false;
ChunkManagerPtr manager = conf->getChunkManagerIfExists( ns, true, true );
if( ! manager ) return false;
return true;
}
示例3: guessMergeShard
// TODO: Same limitations as other mongos metadata commands, sometimes we'll be stale here
// and fail. Need to better integrate targeting with commands.
ShardPtr guessMergeShard( const NamespaceString& nss, const BSONObj& minKey ) {
DBConfigPtr config = grid.getDBConfig( nss.ns() );
if ( !config->isSharded( nss ) ) {
config->reload();
if ( !config->isSharded( nss ) ) {
return ShardPtr();
}
}
ChunkManagerPtr manager = config->getChunkManager( nss );
if ( !manager ) return ShardPtr();
ChunkPtr chunk = manager->findChunkForDoc( minKey );
if ( !chunk ) return ShardPtr();
return ShardPtr( new Shard( chunk->getShard() ) );
}
示例4: refreshNow
Status ChunkManagerTargeter::refreshNow( RefreshType refreshType ) {
DBConfigPtr config;
string errMsg;
if ( !getDBConfigSafe( _nss.db(), config, &errMsg ) ) {
return Status( ErrorCodes::DatabaseNotFound, errMsg );
}
// Try not to spam the configs
refreshBackoff();
// TODO: Improve synchronization and make more explicit
if ( refreshType == RefreshType_RefreshChunkManager ) {
try {
// Forces a remote check of the collection info, synchronization between threads
// happens internally.
config->getChunkManagerIfExists( _nss.ns(), true );
}
catch ( const DBException& ex ) {
return Status( ErrorCodes::UnknownError, ex.toString() );
}
config->getChunkManagerOrPrimary( _nss.ns(), _manager, _primary );
}
else if ( refreshType == RefreshType_ReloadDatabase ) {
try {
// Dumps the db info, reloads it all, synchronization between threads happens
// internally.
config->reload();
config->getChunkManagerIfExists( _nss.ns(), true, true );
}
catch ( const DBException& ex ) {
return Status( ErrorCodes::UnknownError, ex.toString() );
}
config->getChunkManagerOrPrimary( _nss.ns(), _manager, _primary );
}
return Status::OK();
}
示例5: checkShardVersion
/**
* @return true if had to do something
*/
bool checkShardVersion( DBClientBase * conn_in , const string& ns , ChunkManagerPtr refManager, bool authoritative , int tryNumber ) {
// TODO: cache, optimize, etc...
WriteBackListener::init( *conn_in );
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf )
return false;
DBClientBase* conn = getVersionable( conn_in );
verify(conn); // errors thrown above
unsigned long long officialSequenceNumber = 0;
ChunkManagerPtr manager;
const bool isSharded = conf->isSharded( ns );
if ( isSharded ) {
manager = conf->getChunkManagerIfExists( ns , authoritative );
// It's possible the chunk manager was reset since we checked whether sharded was true,
// so must check this here.
if( manager ) officialSequenceNumber = manager->getSequenceNumber();
}
// Check this manager against the reference manager
if( isSharded && manager ){
Shard shard = Shard::make( conn->getServerAddress() );
if( refManager && ! refManager->compatibleWith( manager, shard ) ){
throw SendStaleConfigException( ns, str::stream() << "manager (" << manager->getVersion( shard ).toString() << " : " << manager->getSequenceNumber() << ") "
<< "not compatible with reference manager (" << refManager->getVersion( shard ).toString() << " : " << refManager->getSequenceNumber() << ") "
<< "on shard " << shard.getName() << " (" << shard.getAddress().toString() << ")",
refManager->getVersion( shard ), manager->getVersion( shard ) );
}
}
else if( refManager ){
Shard shard = Shard::make( conn->getServerAddress() );
string msg( str::stream() << "not sharded ("
<< ( (manager.get() == 0) ? string( "<none>" ) :
str::stream() << manager->getSequenceNumber() )
<< ") but has reference manager ("
<< refManager->getSequenceNumber() << ") "
<< "on conn " << conn->getServerAddress() << " ("
<< conn_in->getServerAddress() << ")" );
throw SendStaleConfigException( ns, msg,
refManager->getVersion( shard ), ShardChunkVersion( 0, OID() ));
}
// has the ChunkManager been reloaded since the last time we updated the connection-level version?
// (ie., last time we issued the setShardVersions below)
unsigned long long sequenceNumber = connectionShardStatus.getSequence(conn,ns);
if ( sequenceNumber == officialSequenceNumber ) {
return false;
}
ShardChunkVersion version = ShardChunkVersion( 0, OID() );
if ( isSharded && manager ) {
version = manager->getVersion( Shard::make( conn->getServerAddress() ) );
}
if( ! version.isSet() ){
LOG(0) << "resetting shard version of " << ns << " on " << conn->getServerAddress() << ", " <<
( ! isSharded ? "no longer sharded" :
( ! manager ? "no chunk manager found" :
"version is zero" ) ) << endl;
}
LOG(2) << " have to set shard version for conn: " << conn->getServerAddress() << " ns:" << ns
<< " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
<< " version: " << version << " manager: " << manager.get()
<< endl;
const string versionableServerAddress(conn->getServerAddress());
BSONObj result;
if ( setShardVersion( *conn , ns , version , authoritative , result ) ) {
// success!
LOG(1) << " setShardVersion success: " << result << endl;
connectionShardStatus.setSequence( conn , ns , officialSequenceNumber );
return true;
}
LOG(1) << " setShardVersion failed!\n" << result << endl;
if ( result["need_authoritative"].trueValue() )
massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative );
if ( ! authoritative ) {
// use the original connection and get a fresh versionable connection
// since conn can be invalidated (or worse, freed) after the failure
checkShardVersion(conn_in, ns, refManager, 1, tryNumber + 1);
return true;
}
if ( result["reloadConfig"].trueValue() ) {
if( result["version"].timestampTime() == 0 ){
//.........这里部分代码省略.........
示例6: checkShardVersion
/**
* @return true if had to do something
*/
bool checkShardVersion( DBClientBase& conn_in , const string& ns , bool authoritative , int tryNumber ) {
// TODO: cache, optimize, etc...
WriteBackListener::init( conn_in );
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf )
return false;
DBClientBase* conn = 0;
switch ( conn_in.type() ) {
case ConnectionString::INVALID:
assert(0);
break;
case ConnectionString::MASTER:
// great
conn = &conn_in;
break;
case ConnectionString::PAIR:
assert( ! "pair not support for sharding" );
break;
case ConnectionString::SYNC:
// TODO: we should check later that we aren't actually sharded on this
conn = &conn_in;
break;
case ConnectionString::SET:
DBClientReplicaSet* set = (DBClientReplicaSet*)&conn_in;
conn = &(set->masterConn());
break;
}
assert(conn);
unsigned long long officialSequenceNumber = 0;
ChunkManagerPtr manager;
const bool isSharded = conf->isSharded( ns );
if ( isSharded ) {
manager = conf->getChunkManagerIfExists( ns , authoritative );
// It's possible the chunk manager was reset since we checked whether sharded was true,
// so must check this here.
if( manager ) officialSequenceNumber = manager->getSequenceNumber();
}
// has the ChunkManager been reloaded since the last time we updated the connection-level version?
// (ie., last time we issued the setShardVersions below)
unsigned long long sequenceNumber = connectionShardStatus.getSequence(conn,ns);
if ( sequenceNumber == officialSequenceNumber ) {
return false;
}
ShardChunkVersion version = 0;
if ( isSharded && manager ) {
version = manager->getVersion( Shard::make( conn->getServerAddress() ) );
}
LOG(2) << " have to set shard version for conn: " << conn << " ns:" << ns
<< " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
<< " version: " << version << " manager: " << manager.get()
<< endl;
BSONObj result;
if ( setShardVersion( *conn , ns , version , authoritative , result ) ) {
// success!
LOG(1) << " setShardVersion success: " << result << endl;
connectionShardStatus.setSequence( conn , ns , officialSequenceNumber );
return true;
}
LOG(1) << " setShardVersion failed!\n" << result << endl;
if ( result["need_authoritative"].trueValue() )
massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative );
if ( ! authoritative ) {
checkShardVersion( *conn , ns , 1 , tryNumber + 1 );
return true;
}
if ( result["reloadConfig"].trueValue() ) {
if( result["version"].timestampTime() == 0 ){
// reload db
conf->reload();
}
else {
// reload config
conf->getChunkManager( ns , true );
}
}
const int maxNumTries = 7;
if ( tryNumber < maxNumTries ) {
LOG( tryNumber < ( maxNumTries / 2 ) ? 1 : 0 )
<< "going to retry checkShardVersion host: " << conn->getServerAddress() << " " << result << endl;
sleepmillis( 10 * tryNumber );
//.........这里部分代码省略.........
示例7: checkShardVersion
//.........这里部分代码省略.........
<< ( (manager.get() == 0) ? string( "<none>" ) :
str::stream() << manager->getSequenceNumber() )
<< ") but has reference manager ("
<< refManager->getSequenceNumber() << ") "
<< "on conn " << conn->getServerAddress() << " ("
<< conn_in->getServerAddress() << ")" );
throw SendStaleConfigException(ns,
msg,
refManager->getVersion(shard.getName()),
ChunkVersion::UNSHARDED());
}
// Do not send setShardVersion to collections on the config servers - this causes problems
// when config servers are also shards and get SSV with conflicting names.
// TODO: Make config servers regular shards
if (primary && primary->getName() == "config") {
return false;
}
// Has the ChunkManager been reloaded since the last time we updated the shard version over
// this connection? If we've never updated the shard version, do so now.
unsigned long long sequenceNumber = 0;
if (connectionShardStatus.getSequence(conn, ns, &sequenceNumber)) {
if (sequenceNumber == officialSequenceNumber) {
return false;
}
}
// Now that we're sure we're sending SSV and not to a single config server, get the shard
Shard shard = Shard::make(conn->getServerAddress());
ChunkVersion version = ChunkVersion(0, 0, OID());
if (manager)
version = manager->getVersion(shard.getName());
LOG(1) << "setting shard version of " << version << " for " << ns << " on shard "
<< shard.toString();
LOG(3) << "last version sent with chunk manager iteration " << sequenceNumber
<< ", current chunk manager iteration is " << officialSequenceNumber;
BSONObj result;
if (setShardVersion(*conn,
ns,
configServer.modelServer(),
version,
manager.get(),
authoritative,
result)) {
LOG(1) << " setShardVersion success: " << result;
connectionShardStatus.setSequence( conn , ns , officialSequenceNumber );
return true;
}
LOG(1) << " setShardVersion failed!\n" << result << endl;
if ( result["need_authoritative"].trueValue() )
massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative );
if ( ! authoritative ) {
// use the original connection and get a fresh versionable connection
// since conn can be invalidated (or worse, freed) after the failure
checkShardVersion(conn_in, ns, refManager, 1, tryNumber + 1);
return true;
}
if ( result["reloadConfig"].trueValue() ) {
if( result["version"].timestampTime() == 0 ){
warning() << "reloading full configuration for " << conf->name()
<< ", connection state indicates significant version changes";
// reload db
conf->reload();
}
else {
// reload config
conf->getChunkManager( ns , true );
}
}
const int maxNumTries = 7;
if ( tryNumber < maxNumTries ) {
LOG( tryNumber < ( maxNumTries / 2 ) ? 1 : 0 )
<< "going to retry checkShardVersion shard: " << shard.toString() << " " << result;
sleepmillis( 10 * tryNumber );
// use the original connection and get a fresh versionable connection
// since conn can be invalidated (or worse, freed) after the failure
checkShardVersion(conn_in, ns, refManager, true, tryNumber + 1);
return true;
}
string errmsg = str::stream() << "setShardVersion failed shard: " << shard.toString()
<< " " << result;
log() << " " << errmsg << endl;
massert( 10429 , errmsg , 0 );
return true;
}
示例8: run
void WriteBackListener::run() {
int secsToSleep = 0;
scoped_ptr<ChunkVersion> lastNeededVersion;
int lastNeededCount = 0;
bool needsToReloadShardInfo = false;
while ( ! inShutdown() ) {
if ( ! Shard::isAShardNode( _addr ) ) {
LOG(1) << _addr << " is not a shard node" << endl;
sleepsecs( 60 );
continue;
}
try {
if (needsToReloadShardInfo) {
// It's possible this shard was removed
Shard::reloadShardInfo();
needsToReloadShardInfo = false;
}
scoped_ptr<ScopedDbConnection> conn(
ScopedDbConnection::getInternalScopedDbConnection( _addr ) );
BSONObj result;
{
BSONObjBuilder cmd;
cmd.appendOID( "writebacklisten" , &serverID ); // Command will block for data
if ( ! conn->get()->runCommand( "admin" , cmd.obj() , result ) ) {
result = result.getOwned();
log() << "writebacklisten command failed! " << result << endl;
conn->done();
continue;
}
}
conn->done();
LOG(1) << "writebacklisten result: " << result << endl;
BSONObj data = result.getObjectField( "data" );
if ( data.getBoolField( "writeBack" ) ) {
string ns = data["ns"].valuestrsafe();
ConnectionIdent cid( "" , 0 );
OID wid;
if ( data["connectionId"].isNumber() && data["id"].type() == jstOID ) {
string s = "";
if ( data["instanceIdent"].type() == String )
s = data["instanceIdent"].String();
cid = ConnectionIdent( s , data["connectionId"].numberLong() );
wid = data["id"].OID();
}
else {
warning() << "mongos/mongod version mismatch (1.7.5 is the split)" << endl;
}
int len; // not used, but needed for next call
Message msg( (void*)data["msg"].binData( len ) , false );
massert( 10427 , "invalid writeback message" , msg.header()->valid() );
DBConfigPtr db = grid.getDBConfig( ns );
ChunkVersion needVersion = ChunkVersion::fromBSON( data, "version" );
//
// TODO: Refactor the sharded strategy to correctly handle all sharding state changes itself,
// we can't rely on WBL to do this for us b/c anything could reset our state in-between.
// We should always reload here for efficiency when possible, but staleness is also caught in the
// loop below.
//
ChunkManagerPtr manager;
ShardPtr primary;
db->getChunkManagerOrPrimary( ns, manager, primary );
ChunkVersion currVersion;
if( manager ) currVersion = manager->getVersion();
LOG(1) << "connectionId: " << cid << " writebackId: " << wid << " needVersion : " << needVersion.toString()
<< " mine : " << currVersion.toString() << endl;
LOG(1) << msg.toString() << endl;
//
// We should reload only if we need to update our version to be compatible *and* we
// haven't already done so. This avoids lots of reloading when we remove/add a sharded collection
//
bool alreadyReloaded = lastNeededVersion &&
lastNeededVersion->isEquivalentTo( needVersion );
if( alreadyReloaded ){
LOG(1) << "wbl already reloaded config information for version "
<< needVersion << ", at version " << currVersion << endl;
}
else if( lastNeededVersion ) {
//.........这里部分代码省略.........
示例9: checkShardVersion
/**
* @return true if had to do something
*/
bool checkShardVersion( DBClientBase& conn_in , const string& ns , bool authoritative , int tryNumber ) {
// TODO: cache, optimize, etc...
WriteBackListener::init( conn_in );
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf )
return false;
DBClientBase* conn = getVersionable( &conn_in );
assert(conn); // errors thrown above
unsigned long long officialSequenceNumber = 0;
ChunkManagerPtr manager;
const bool isSharded = conf->isSharded( ns );
if ( isSharded ) {
manager = conf->getChunkManagerIfExists( ns , authoritative );
// It's possible the chunk manager was reset since we checked whether sharded was true,
// so must check this here.
if( manager ) officialSequenceNumber = manager->getSequenceNumber();
}
// has the ChunkManager been reloaded since the last time we updated the connection-level version?
// (ie., last time we issued the setShardVersions below)
unsigned long long sequenceNumber = connectionShardStatus.getSequence(conn,ns);
if ( sequenceNumber == officialSequenceNumber ) {
return false;
}
ShardChunkVersion version = 0;
if ( isSharded && manager ) {
version = manager->getVersion( Shard::make( conn->getServerAddress() ) );
}
if( version == 0 ){
LOG(0) << "resetting shard version of " << ns << " on " << conn->getServerAddress() << ", " <<
( ! isSharded ? "no longer sharded" :
( ! manager ? "no chunk manager found" :
"version is zero" ) ) << endl;
}
LOG(2) << " have to set shard version for conn: " << conn << " ns:" << ns
<< " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
<< " version: " << version << " manager: " << manager.get()
<< endl;
BSONObj result;
if ( setShardVersion( *conn , ns , version , authoritative , result ) ) {
// success!
LOG(1) << " setShardVersion success: " << result << endl;
connectionShardStatus.setSequence( conn , ns , officialSequenceNumber );
return true;
}
LOG(1) << " setShardVersion failed!\n" << result << endl;
if ( result["need_authoritative"].trueValue() )
massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative );
if ( ! authoritative ) {
checkShardVersion( *conn , ns , 1 , tryNumber + 1 );
return true;
}
if ( result["reloadConfig"].trueValue() ) {
if( result["version"].timestampTime() == 0 ){
// reload db
conf->reload();
}
else {
// reload config
conf->getChunkManager( ns , true );
}
}
const int maxNumTries = 7;
if ( tryNumber < maxNumTries ) {
LOG( tryNumber < ( maxNumTries / 2 ) ? 1 : 0 )
<< "going to retry checkShardVersion host: " << conn->getServerAddress() << " " << result << endl;
sleepmillis( 10 * tryNumber );
checkShardVersion( *conn , ns , true , tryNumber + 1 );
return true;
}
string errmsg = str::stream() << "setShardVersion failed host: " << conn->getServerAddress() << " " << result;
log() << " " << errmsg << endl;
massert( 10429 , errmsg , 0 );
return true;
}