本文整理汇总了C++中ConnectionString::getServers方法的典型用法代码示例。如果您正苦于以下问题:C++ ConnectionString::getServers方法的具体用法?C++ ConnectionString::getServers怎么用?C++ ConnectionString::getServers使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ConnectionString
的用法示例。
在下文中一共展示了ConnectionString::getServers方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: invariant
shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getOrCreateMonitor(
const ConnectionString& connStr) {
invariant(connStr.type() == ConnectionString::SET);
stdx::lock_guard<stdx::mutex> lk(_mutex);
// do not restart taskExecutor if is in shutdown
if (!_taskExecutor && !_isShutdown) {
// construct task executor
auto net = executor::makeNetworkInterface("ReplicaSetMonitor-TaskExecutor");
auto netPtr = net.get();
_taskExecutor = stdx::make_unique<ThreadPoolTaskExecutor>(
stdx::make_unique<NetworkInterfaceThreadPool>(netPtr), std::move(net));
LOG(1) << "Starting up task executor for monitoring replica sets in response to request to "
"monitor set: "
<< connStr.toString();
_taskExecutor->startup();
}
auto setName = connStr.getSetName();
auto monitor = _monitors[setName].lock();
if (monitor) {
return monitor;
}
const std::set<HostAndPort> servers(connStr.getServers().begin(), connStr.getServers().end());
log() << "Starting new replica set monitor for " << connStr.toString();
auto newMonitor = std::make_shared<ReplicaSetMonitor>(setName, servers);
_monitors[setName] = newMonitor;
newMonitor->init();
return newMonitor;
}
示例2: findMaster
Status DBClientShardResolver::findMaster( const std::string connString,
ConnectionString* resolvedHost ) {
std::string errMsg;
ConnectionString rawHost = ConnectionString::parse( connString, errMsg );
dassert( errMsg == "" );
dassert( rawHost.type() == ConnectionString::SET
|| rawHost.type() == ConnectionString::MASTER );
if ( rawHost.type() == ConnectionString::MASTER ) {
*resolvedHost = rawHost;
return Status::OK();
}
//
// If we need to, then get the particular node we're targeting in the replica set
//
// Don't create the monitor unless we need to - fast path
ReplicaSetMonitorPtr replMonitor = ReplicaSetMonitor::get(rawHost.getSetName());
if (!replMonitor) {
// Slow path
std::set<HostAndPort> seedServers(rawHost.getServers().begin(),
rawHost.getServers().end());
ReplicaSetMonitor::createIfNeeded(rawHost.getSetName(), seedServers);
replMonitor = ReplicaSetMonitor::get(rawHost.getSetName());
}
if (!replMonitor) {
return Status( ErrorCodes::ReplicaSetNotFound,
string("unknown replica set ") + rawHost.getSetName() );
}
try {
// This can throw when we don't find a master!
HostAndPort masterHostAndPort = replMonitor->getMasterOrUassert();
*resolvedHost = ConnectionString::parse( masterHostAndPort.toString(), errMsg );
dassert( errMsg == "" );
return Status::OK();
}
catch ( const DBException& ) {
return Status( ErrorCodes::HostNotFound,
string("could not contact primary for replica set ")
+ replMonitor->getName() );
}
// Unreachable
dassert( false );
return Status( ErrorCodes::UnknownError, "" );
}
示例3: invariant
shared_ptr<ReplicaSetMonitor>
ReplicaSetMonitorManager::getOrCreateMonitor(const ConnectionString& connStr) {
invariant(connStr.type() == ConnectionString::SET);
stdx::lock_guard<stdx::mutex> lk(_mutex);
shared_ptr<ReplicaSetMonitor>& monitor = _monitors[connStr.getSetName()];
if (!monitor) {
const std::set<HostAndPort> servers(connStr.getServers().begin(),
connStr.getServers().end());
monitor = std::make_shared<ReplicaSetMonitor>(connStr.getSetName(), servers);
}
return monitor;
}
示例4: remove
void ShardRegistry::remove(const ShardId& id) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
set<string> entriesToRemove;
for (const auto& i : _lookup) {
shared_ptr<Shard> s = i.second;
if (s->getId() == id) {
entriesToRemove.insert(i.first);
ConnectionString connStr = s->getConnString();
for (const auto& host : connStr.getServers()) {
entriesToRemove.insert(host.toString());
}
}
}
for (const auto& entry : entriesToRemove) {
_lookup.erase(entry);
}
for (ShardMap::iterator i = _rsLookup.begin(); i != _rsLookup.end();) {
shared_ptr<Shard> s = i->second;
if (s->getId() == id) {
_rsLookup.erase(i++);
} else {
++i;
}
}
shardConnectionPool.removeHost(id);
ReplicaSetMonitor::remove(id);
}
示例5: _updateLookupMapsForShard_inlock
void ShardRegistry::_updateLookupMapsForShard_inlock(shared_ptr<Shard> shard,
const ConnectionString& newConnString) {
auto oldConnString = shard->getConnString();
for (const auto& host : oldConnString.getServers()) {
_lookup.erase(host.toString());
}
_lookup[shard->getId()] = shard;
if (newConnString.type() == ConnectionString::SET) {
_rsLookup[newConnString.getSetName()] = shard;
} else if (newConnString.type() == ConnectionString::CUSTOM) {
// CUSTOM connection strings (ie "$dummy:10000) become DBDirectClient connections which
// always return "localhost" as their resposne to getServerAddress(). This is just for
// making dbtest work.
_lookup["localhost"] = shard;
}
// TODO: The only reason to have the shard host names in the lookup table is for the
// setShardVersion call, which resolves the shard id from the shard address. This is
// error-prone and will go away eventually when we switch all communications to go through
// the remote command runner and all nodes are sharding aware by default.
_lookup[newConnString.toString()] = shard;
for (const HostAndPort& hostAndPort : newConnString.getServers()) {
_lookup[hostAndPort.toString()] = shard;
}
}
示例6: switch
std::unique_ptr<RemoteCommandTargeter> RemoteCommandTargeterFactoryImpl::create(
const ConnectionString& connStr) {
switch (connStr.type()) {
case ConnectionString::MASTER:
case ConnectionString::CUSTOM:
invariant(connStr.getServers().size() == 1);
return stdx::make_unique<RemoteCommandTargeterStandalone>(connStr.getServers().front());
case ConnectionString::SET:
return stdx::make_unique<RemoteCommandTargeterRS>(connStr.getSetName(),
connStr.getServers());
case ConnectionString::INVALID:
// These connections should never be seen
break;
}
MONGO_UNREACHABLE;
}
示例7: removeHost
void MongoConnectionPool::removeHost(const ConnectionString& host) {
lock_guard<mutex> lock(m_Mutex);
cout<< "Removing connections from all pools for host " << host.getServers()[0].toString() << endl;
for(auto i: m_Pools) {
if(BuildHostString(host).compare(i.first) == 0)
i.second.clear();
}
}
示例8: invariant
shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getOrCreateMonitor(
const ConnectionString& connStr) {
invariant(connStr.type() == ConnectionString::SET);
stdx::lock_guard<stdx::mutex> lk(_mutex);
_setupTaskExecutorInLock(connStr.toString());
auto setName = connStr.getSetName();
auto monitor = _monitors[setName].lock();
if (monitor) {
return monitor;
}
const std::set<HostAndPort> servers(connStr.getServers().begin(), connStr.getServers().end());
log() << "Starting new replica set monitor for " << connStr.toString();
auto newMonitor = std::make_shared<ReplicaSetMonitor>(setName, servers);
_monitors[setName] = newMonitor;
newMonitor->init();
return newMonitor;
}
示例9: remoteTime
/**
* Returns the remote time as reported by the cluster or server. The maximum difference between the
* reported time and the actual time on the remote server (at the completion of the function) is the
* maxNetSkew
*/
Date_t DistributedLock::remoteTime(const ConnectionString& cluster, unsigned long long maxNetSkew) {
ConnectionString server(*cluster.getServers().begin());
// Get result and delay if successful, errMsg if not
bool success = false;
BSONObj result;
string errMsg;
Milliseconds delay{0};
unique_ptr<ScopedDbConnection> connPtr;
try {
connPtr.reset(new ScopedDbConnection(server.toString()));
ScopedDbConnection& conn = *connPtr;
Date_t then = jsTime();
success = conn->runCommand(string("admin"), BSON("serverStatus" << 1), result);
delay = jsTime() - then;
if (!success)
errMsg = result.toString();
conn.done();
} catch (const DBException& ex) {
if (connPtr && connPtr->get()->isFailed()) {
// Return to the pool so the pool knows about the failure
connPtr->done();
}
success = false;
errMsg = ex.toString();
}
if (!success) {
throw TimeNotFoundException(str::stream() << "could not get status from server "
<< server.toString() << " in cluster "
<< cluster.toString() << " to check time"
<< causedBy(errMsg),
13647);
}
// Make sure that our delay is not more than 2x our maximum network skew, since this is the max
// our remote time value can be off by if we assume a response in the middle of the delay.
if (delay > Milliseconds(maxNetSkew * 2)) {
throw TimeNotFoundException(
str::stream() << "server " << server.toString() << " in cluster " << cluster.toString()
<< " did not respond within max network delay of " << maxNetSkew << "ms",
13648);
}
return result["localTime"].Date() - (delay / 2);
}
示例10: _addShard
void ShardRegistryData::_addShard(WithLock lk,
std::shared_ptr<Shard> const& shard,
bool useOriginalCS) {
const ShardId shardId = shard->getId();
const ConnectionString connString =
useOriginalCS ? shard->originalConnString() : shard->getConnString();
auto currentShard = _findByShardId(lk, shardId);
if (currentShard) {
auto oldConnString = currentShard->originalConnString();
if (oldConnString.toString() != connString.toString()) {
log() << "Updating ShardRegistry connection string for shard " << currentShard->getId()
<< " from: " << oldConnString.toString() << " to: " << connString.toString();
}
for (const auto& host : oldConnString.getServers()) {
_lookup.erase(host.toString());
_hostLookup.erase(host);
}
_lookup.erase(oldConnString.toString());
}
_lookup[shard->getId()] = shard;
LOG(3) << "Adding shard " << shard->getId() << ", with CS " << connString.toString();
if (connString.type() == ConnectionString::SET) {
_rsLookup[connString.getSetName()] = shard;
} else if (connString.type() == ConnectionString::CUSTOM) {
// CUSTOM connection strings (ie "$dummy:10000) become DBDirectClient connections which
// always return "localhost" as their response to getServerAddress(). This is just for
// making dbtest work.
_lookup[ShardId("localhost")] = shard;
_hostLookup[HostAndPort("localhost")] = shard;
}
// TODO: The only reason to have the shard host names in the lookup table is for the
// setShardVersion call, which resolves the shard id from the shard address. This is
// error-prone and will go away eventually when we switch all communications to go through
// the remote command runner and all nodes are sharding aware by default.
_lookup[connString.toString()] = shard;
for (const HostAndPort& hostAndPort : connString.getServers()) {
_lookup[hostAndPort.toString()] = shard;
_hostLookup[hostAndPort] = shard;
}
}
示例11: getConfigHosts
/**
* Returns the currently-set config hosts for a cluster
*/
static vector<ConnectionString> getConfigHosts() {
vector<ConnectionString> configHosts;
ConnectionString configHostOrHosts = configServer.getConnectionString();
if (configHostOrHosts.type() == ConnectionString::MASTER) {
configHosts.push_back(configHostOrHosts);
} else if (configHostOrHosts.type() == ConnectionString::SYNC) {
vector<HostAndPort> configHPs = configHostOrHosts.getServers();
for (vector<HostAndPort>::iterator it = configHPs.begin(); it != configHPs.end(); ++it) {
configHosts.push_back(ConnectionString(*it));
}
} else {
// This is only for tests.
dassert(configHostOrHosts.type() == ConnectionString::CUSTOM);
configHosts.push_back(configHostOrHosts);
}
return configHosts;
}
示例12: skewClocks
/**
* Skews the clocks of a remote cluster by a particular amount, specified by
* the "skewHosts" element in a BSONObj.
*/
static void skewClocks( ConnectionString& cluster, BSONObj& cmdObj ) {
vector<long long> skew;
if(cmdObj.hasField("skewHosts")) {
bsonArrToNumVector<long long>(cmdObj["skewHosts"], skew);
}
else {
LOG( logLvl ) << "No host clocks to skew." << endl;
return;
}
LOG( logLvl ) << "Skewing clocks of hosts " << cluster << endl;
unsigned s = 0;
for(vector<long long>::iterator i = skew.begin(); i != skew.end(); ++i,s++) {
ConnectionString server( cluster.getServers()[s] );
scoped_ptr<ScopedDbConnection> conn(
ScopedDbConnection::getInternalScopedDbConnection( server.toString() ) );
BSONObj result;
try {
bool success = conn->get()->runCommand( string("admin"),
BSON( "_skewClockCommand" << 1
<< "skew" << *i ),
result );
uassert(13678, str::stream() << "Could not communicate with server " << server.toString() << " in cluster " << cluster.toString() << " to change skew by " << *i, success );
LOG( logLvl + 1 ) << " Skewed host " << server << " clock by " << *i << endl;
}
catch(...) {
conn->done();
throw;
}
conn->done();
}
}
示例13: _updateLookupMapsForShard_inlock
void ShardRegistry::_updateLookupMapsForShard_inlock(shared_ptr<Shard> shard,
const ConnectionString& newConnString) {
auto oldConnString = shard->getConnString();
for (const auto& host : oldConnString.getServers()) {
_lookup.erase(host.toString());
}
_lookup[shard->getId()] = shard;
if (newConnString.type() == ConnectionString::SET) {
_rsLookup[newConnString.getSetName()] = shard;
}
// TODO: The only reason to have the shard host names in the lookup table is for the
// setShardVersion call, which resolves the shard id from the shard address. This is
// error-prone and will go away eventually when we switch all communications to go through
// the remote command runner and all nodes are sharding aware by default.
_lookup[newConnString.toString()] = shard;
for (const HostAndPort& hostAndPort : newConnString.getServers()) {
_lookup[hostAndPort.toString()] = shard;
}
}
示例14: _discover
bool _discover( StateMap& threads , const string& host , const shared_ptr<ServerState>& ss ) {
BSONObj info = ss->now;
bool found = false;
if ( info["repl"].isABSONObj() ) {
BSONObj x = info["repl"].Obj();
if ( x["hosts"].isABSONObj() )
if ( _addAll( threads , x["hosts"].Obj() ) )
found = true;
if ( x["passives"].isABSONObj() )
if ( _addAll( threads , x["passives"].Obj() ) )
found = true;
}
if ( ss->mongos ) {
for ( unsigned i=0; i<ss->shards.size(); i++ ) {
BSONObj x = ss->shards[i];
string errmsg;
ConnectionString cs = ConnectionString::parse( x["host"].String() , errmsg );
if ( errmsg.size() ) {
cerr << errmsg << endl;
continue;
}
vector<HostAndPort> v = cs.getServers();
for ( unsigned i=0; i<v.size(); i++ ) {
if ( _add( threads , v[i].toString() ) )
found = true;
}
}
}
return found;
}
示例15: addShard
//.........这里部分代码省略.........
BSONObj res;
bool ok = newShardConn->runCommand("admin",BSON("replSetGetStatus"<<1),res);
ostringstream ss;
if( !ok && res["info"].type() == String && res["info"].String() == "configsvr" ) {
errMsg = "the specified mongod is a --configsvr and should thus not be a shard server";
newShardConn.done();
return false;
}
}
// if the shard is part of a replica set, make sure all the hosts mentioned in 'servers' are part of
// the set. It is fine if not all members of the set are present in 'servers'.
bool foundAll = true;
string offendingHost;
if ( ! commandSetName.empty() ) {
set<string> hostSet;
BSONObjIterator iter( resIsMaster["hosts"].Obj() );
while ( iter.more() ) {
hostSet.insert( iter.next().String() ); // host:port
}
if ( resIsMaster["passives"].isABSONObj() ) {
BSONObjIterator piter( resIsMaster["passives"].Obj() );
while ( piter.more() ) {
hostSet.insert( piter.next().String() ); // host:port
}
}
if ( resIsMaster["arbiters"].isABSONObj() ) {
BSONObjIterator piter( resIsMaster["arbiters"].Obj() );
while ( piter.more() ) {
hostSet.insert( piter.next().String() ); // host:port
}
}
vector<HostAndPort> hosts = servers.getServers();
for ( size_t i = 0 ; i < hosts.size() ; i++ ) {
if (!hosts[i].hasPort()) {
hosts[i].setPort(ServerGlobalParams::DefaultDBPort);
}
string host = hosts[i].toString(); // host:port
if ( hostSet.find( host ) == hostSet.end() ) {
offendingHost = host;
foundAll = false;
break;
}
}
}
if ( ! foundAll ) {
ostringstream ss;
ss << "in seed list " << servers.toString() << ", host " << offendingHost
<< " does not belong to replica set " << setName;
errMsg = ss.str();
newShardConn.done();
return false;
}
// shard name defaults to the name of the replica set
if ( name->empty() && ! setName.empty() )
*name = setName;
// In order to be accepted as a new shard, that mongod must not have any database name that exists already
// in any other shards. If that test passes, the new shard's databases are going to be entered as
// non-sharded db's whose primary is the newly added shard.
BSONObj resListDB;
ok = newShardConn->runCommand( "admin" , BSON( "listDatabases" << 1 ) , resListDB );
if ( !ok ) {