本文整理汇总了C++中str::stream方法的典型用法代码示例。如果您正苦于以下问题:C++ str::stream方法的具体用法?C++ str::stream怎么用?C++ str::stream使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类str
的用法示例。
在下文中一共展示了str::stream方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Status
StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientImpl::getCollection(
OperationContext* opCtx, const NamespaceString& nss, repl::ReadConcernLevel readConcernLevel) {
auto statusFind = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
readConcernLevel,
CollectionType::ConfigNS,
BSON(CollectionType::fullNs(nss.ns())),
BSONObj(),
1);
if (!statusFind.isOK()) {
return statusFind.getStatus();
}
const auto& retOpTimePair = statusFind.getValue();
const auto& retVal = retOpTimePair.value;
if (retVal.empty()) {
return Status(ErrorCodes::NamespaceNotFound,
stream() << "collection " << nss.ns() << " not found");
}
invariant(retVal.size() == 1);
auto parseStatus = CollectionType::fromBSON(retVal.front());
if (!parseStatus.isOK()) {
return parseStatus.getStatus();
}
auto collType = parseStatus.getValue();
if (collType.getDropped()) {
return Status(ErrorCodes::NamespaceNotFound,
stream() << "collection " << nss.ns() << " was dropped");
}
return repl::OpTimeWith<CollectionType>(collType, retOpTimePair.opTime);
}
示例2: BSONObj
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) {
std::vector<ShardType> shards;
auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
readConcern,
ShardType::ConfigNS,
BSONObj(), // no query filter
BSONObj(), // no sort
boost::none); // no limit
if (!findStatus.isOK()) {
return findStatus.getStatus();
}
for (const BSONObj& doc : findStatus.getValue().value) {
auto shardRes = ShardType::fromBSON(doc);
if (!shardRes.isOK()) {
return shardRes.getStatus().withContext(stream() << "Failed to parse shard document "
<< doc);
}
Status validateStatus = shardRes.getValue().validate();
if (!validateStatus.isOK()) {
return validateStatus.withContext(stream() << "Failed to validate shard document "
<< doc);
}
shards.push_back(shardRes.getValue());
}
return repl::OpTimeWith<std::vector<ShardType>>{std::move(shards),
findStatus.getValue().opTime};
}
示例3: _schedule_inlock
void MigrationManager::_schedule_inlock(OperationContext* txn,
const HostAndPort& targetHost,
Migration migration) {
executor::TaskExecutor* const executor = Grid::get(txn)->getExecutorPool()->getFixedExecutor();
const NamespaceString nss(migration.nss);
auto it = _activeMigrations.find(nss);
if (it == _activeMigrations.end()) {
const std::string whyMessage(stream() << "Migrating chunk(s) in collection " << nss.ns());
// Acquire the collection distributed lock (blocking call)
auto statusWithDistLockHandle =
Grid::get(txn)->catalogClient(txn)->getDistLockManager()->lockWithSessionID(
txn,
nss.ns(),
whyMessage,
_lockSessionID,
DistLockManager::kSingleLockAttemptTimeout);
if (!statusWithDistLockHandle.isOK()) {
migration.completionNotification->set(
Status(statusWithDistLockHandle.getStatus().code(),
stream() << "Could not acquire collection lock for " << nss.ns()
<< " to migrate chunks, due to "
<< statusWithDistLockHandle.getStatus().reason()));
return;
}
it = _activeMigrations.insert(std::make_pair(nss, MigrationsList())).first;
}
auto migrations = &it->second;
// Add ourselves to the list of migrations on this collection
migrations->push_front(std::move(migration));
auto itMigration = migrations->begin();
const RemoteCommandRequest remoteRequest(
targetHost, NamespaceString::kAdminDb.toString(), itMigration->moveChunkCmdObj, txn);
StatusWith<executor::TaskExecutor::CallbackHandle> callbackHandleWithStatus =
executor->scheduleRemoteCommand(
remoteRequest,
[this, itMigration](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
Client::initThread(getThreadName().c_str());
ON_BLOCK_EXIT([&] { Client::destroy(); });
auto txn = cc().makeOperationContext();
stdx::lock_guard<stdx::mutex> lock(_mutex);
_complete_inlock(txn.get(), itMigration, args.response);
});
if (callbackHandleWithStatus.isOK()) {
itMigration->callbackHandle = std::move(callbackHandleWithStatus.getValue());
return;
}
_complete_inlock(txn, itMigration, std::move(callbackHandleWithStatus.getStatus()));
}
示例4: initialize
Status ChunkMoveOperationState::initialize(const BSONObj& cmdObj) {
// Make sure we're as up-to-date as possible with shard information. This catches the case where
// we might have changed a shard's host by removing/adding a shard with the same name.
grid.shardRegistry()->reload(_txn);
_fromShard = cmdObj["fromShard"].str();
if (_fromShard.empty()) {
return {ErrorCodes::InvalidOptions, "need to specify shard to move chunk from"};
}
_toShard = cmdObj["toShard"].str();
if (_toShard.empty()) {
return {ErrorCodes::InvalidOptions, "need to specify shard to move chunk to"};
}
Status epochStatus = bsonExtractOIDField(cmdObj, "epoch", &_collectionEpoch);
if (!epochStatus.isOK()) {
return epochStatus;
}
_minKey = cmdObj["min"].Obj();
if (_minKey.isEmpty()) {
return {ErrorCodes::InvalidOptions, "need to specify a min"};
}
_maxKey = cmdObj["max"].Obj();
if (_maxKey.isEmpty()) {
return {ErrorCodes::InvalidOptions, "need to specify a max"};
}
{
std::shared_ptr<Shard> fromShard = grid.shardRegistry()->getShard(_txn, _fromShard);
if (!fromShard) {
return {ErrorCodes::ShardNotFound,
stream() << "Source shard " << _fromShard
<< " is missing. This indicates metadata corruption."};
}
_fromShardCS = fromShard->getConnString();
}
{
std::shared_ptr<Shard> toShard = grid.shardRegistry()->getShard(_txn, _toShard);
if (!toShard) {
return {ErrorCodes::ShardNotFound,
stream() << "Destination shard " << _toShard
<< " is missing. This indicates metadata corruption."};
}
_toShardCS = toShard->getConnString();
}
return Status::OK();
}
示例5: getConfigVersion
/**
* Returns the config version of the cluster pointed at by the connection string.
*
* @return OK if version found successfully, error status if something bad happened.
*/
Status getConfigVersion(CatalogManager* catalogManager, VersionType* versionInfo) {
try {
versionInfo->clear();
ScopedDbConnection conn(grid.shardRegistry()->getConfigServerConnectionString(), 30);
unique_ptr<DBClientCursor> cursor(_safeCursor(conn->query("config.version", BSONObj())));
bool hasConfigData = conn->count(ShardType::ConfigNS) ||
conn->count(DatabaseType::ConfigNS) || conn->count(CollectionType::ConfigNS);
if (!cursor->more()) {
// Version is 1 if we have data, 0 if we're completely empty
if (hasConfigData) {
versionInfo->setMinCompatibleVersion(UpgradeHistory_UnreportedVersion);
versionInfo->setCurrentVersion(UpgradeHistory_UnreportedVersion);
} else {
versionInfo->setMinCompatibleVersion(UpgradeHistory_EmptyVersion);
versionInfo->setCurrentVersion(UpgradeHistory_EmptyVersion);
}
conn.done();
return Status::OK();
}
BSONObj versionDoc = cursor->next();
auto versionInfoResult = VersionType::fromBSON(versionDoc);
if (!versionInfoResult.isOK()) {
conn.done();
return Status(ErrorCodes::UnsupportedFormat,
stream() << "invalid config version document " << versionDoc
<< versionInfoResult.getStatus().toString());
}
*versionInfo = versionInfoResult.getValue();
if (cursor->more()) {
conn.done();
return Status(ErrorCodes::RemoteValidationError,
stream() << "should only have 1 document "
<< "in config.version collection");
}
conn.done();
} catch (const DBException& e) {
return e.toStatus();
}
return Status::OK();
}
示例6: stream
StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabase(
OperationContext* opCtx, const std::string& dbName, repl::ReadConcernLevel readConcernLevel) {
if (!NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow)) {
return {ErrorCodes::InvalidNamespace, stream() << dbName << " is not a valid db name"};
}
// The admin database is always hosted on the config server.
if (dbName == NamespaceString::kAdminDb) {
DatabaseType dbt(
dbName, ShardRegistry::kConfigServerShardId, false, databaseVersion::makeFixed());
return repl::OpTimeWith<DatabaseType>(dbt);
}
// The config database's primary shard is always config, and it is always sharded.
if (dbName == NamespaceString::kConfigDb) {
DatabaseType dbt(
dbName, ShardRegistry::kConfigServerShardId, true, databaseVersion::makeFixed());
return repl::OpTimeWith<DatabaseType>(dbt);
}
auto result = _fetchDatabaseMetadata(opCtx, dbName, kConfigReadSelector, readConcernLevel);
if (result == ErrorCodes::NamespaceNotFound) {
// If we failed to find the database metadata on the 'nearest' config server, try again
// against the primary, in case the database was recently created.
result = _fetchDatabaseMetadata(
opCtx, dbName, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, readConcernLevel);
if (!result.isOK() && (result != ErrorCodes::NamespaceNotFound)) {
return result.getStatus().withContext(
str::stream() << "Could not confirm non-existence of database " << dbName);
}
}
return result;
}
示例7: BSON
StatusWith<string> CatalogManagerReplicaSet::getTagForChunk(const std::string& collectionNs,
const ChunkType& chunk) {
auto configShard = grid.shardRegistry()->getShard("config");
auto readHostStatus = configShard->getTargeter()->findHost(kConfigReadSelector);
if (!readHostStatus.isOK()) {
return readHostStatus.getStatus();
}
BSONObj query =
BSON(TagsType::ns(collectionNs) << TagsType::min() << BSON("$lte" << chunk.getMin())
<< TagsType::max() << BSON("$gte" << chunk.getMax()));
auto findStatus = grid.shardRegistry()->exhaustiveFind(
readHostStatus.getValue(), NamespaceString(TagsType::ConfigNS), query, BSONObj(), 1);
if (!findStatus.isOK()) {
return findStatus.getStatus();
}
const auto& docs = findStatus.getValue();
if (docs.empty()) {
return string{};
}
invariant(docs.size() == 1);
BSONObj tagsDoc = docs.front();
const auto tagsResult = TagsType::fromBSON(tagsDoc);
if (!tagsResult.isOK()) {
return {ErrorCodes::FailedToParse,
stream() << "error while parsing " << TagsType::ConfigNS << " document: " << tagsDoc
<< " : " << tagsResult.getStatus().toString()};
}
return tagsResult.getValue().getTag();
}
示例8: Status
StatusWith<CollectionType> CatalogManagerReplicaSet::getCollection(const std::string& collNs) {
auto configShard = grid.shardRegistry()->getShard("config");
auto readHostStatus = configShard->getTargeter()->findHost(kConfigReadSelector);
if (!readHostStatus.isOK()) {
return readHostStatus.getStatus();
}
auto statusFind =
grid.shardRegistry()->exhaustiveFind(readHostStatus.getValue(),
NamespaceString(CollectionType::ConfigNS),
BSON(CollectionType::fullNs(collNs)),
1);
if (!statusFind.isOK()) {
return statusFind.getStatus();
}
const auto& retVal = statusFind.getValue();
if (retVal.empty()) {
return Status(ErrorCodes::NamespaceNotFound,
stream() << "collection " << collNs << " not found");
}
invariant(retVal.size() == 1);
return CollectionType::fromBSON(retVal.front());
}
示例9: getChunks
Status CatalogManagerReplicaSet::getChunks(const Query& query,
int nToReturn,
vector<ChunkType>* chunks) {
chunks->clear();
auto configShard = grid.shardRegistry()->getShard("config");
auto readHostStatus = configShard->getTargeter()->findHost(kConfigReadSelector);
if (!readHostStatus.isOK()) {
return readHostStatus.getStatus();
}
auto findStatus = grid.shardRegistry()->exhaustiveFind(readHostStatus.getValue(),
NamespaceString(ChunkType::ConfigNS),
query.obj,
boost::none); // no limit
if (!findStatus.isOK()) {
return findStatus.getStatus();
}
for (const BSONObj& obj : findStatus.getValue()) {
auto chunkRes = ChunkType::fromBSON(obj);
if (!chunkRes.isOK()) {
chunks->clear();
return {ErrorCodes::FailedToParse,
stream() << "Failed to parse chunk with id ("
<< obj[ChunkType::name()].toString()
<< "): " << chunkRes.getStatus().toString()};
}
chunks->push_back(chunkRes.getValue());
}
return Status::OK();
}
示例10: getAllShards
Status CatalogManagerReplicaSet::getAllShards(vector<ShardType>* shards) {
const auto configShard = grid.shardRegistry()->getShard("config");
const auto readHost = configShard->getTargeter()->findHost(kConfigReadSelector);
if (!readHost.isOK()) {
return readHost.getStatus();
}
auto findStatus = grid.shardRegistry()->exhaustiveFind(readHost.getValue(),
NamespaceString(ShardType::ConfigNS),
BSONObj(), // no query filter
boost::none); // no limit
if (!findStatus.isOK()) {
return findStatus.getStatus();
}
for (const BSONObj& doc : findStatus.getValue()) {
auto shardRes = ShardType::fromBSON(doc);
if (!shardRes.isOK()) {
shards->clear();
return {ErrorCodes::FailedToParse,
stream() << "Failed to parse shard with id ("
<< doc[ShardType::name()].toString()
<< "): " << shardRes.getStatus().toString()};
}
shards->push_back(shardRes.getValue());
}
return Status::OK();
}
示例11: invariant
StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchDatabaseMetadata(
OperationContext* opCtx,
const std::string& dbName,
const ReadPreferenceSetting& readPref,
repl::ReadConcernLevel readConcernLevel) {
invariant(dbName != NamespaceString::kAdminDb && dbName != NamespaceString::kConfigDb);
auto findStatus = _exhaustiveFindOnConfig(opCtx,
readPref,
readConcernLevel,
DatabaseType::ConfigNS,
BSON(DatabaseType::name(dbName)),
BSONObj(),
boost::none);
if (!findStatus.isOK()) {
return findStatus.getStatus();
}
const auto& docsWithOpTime = findStatus.getValue();
if (docsWithOpTime.value.empty()) {
return {ErrorCodes::NamespaceNotFound, stream() << "database " << dbName << " not found"};
}
invariant(docsWithOpTime.value.size() == 1);
auto parseStatus = DatabaseType::fromBSON(docsWithOpTime.value.front());
if (!parseStatus.isOK()) {
return parseStatus.getStatus();
}
return repl::OpTimeWith<DatabaseType>(parseStatus.getValue(), docsWithOpTime.opTime);
}
示例12: clonePlusChunk
CollectionMetadata* CollectionMetadata::clonePlusChunk(const ChunkType& chunk,
const ChunkVersion& newShardVersion,
string* errMsg) const {
// The error message string is optional.
string dummy;
if (errMsg == NULL) {
errMsg = &dummy;
}
// It is acceptable to move version backwards (e.g., undoing a migration that went bad
// during commit) but only cloning away the last chunk may reset the version to 0.
if (!newShardVersion.isSet()) {
*errMsg = stream() << "cannot add chunk " << rangeToString(chunk.getMin(), chunk.getMax())
<< " with zero shard version";
warning() << *errMsg;
return NULL;
}
invariant(chunk.getMin().woCompare(chunk.getMax()) < 0);
// Check that there isn't any chunk on the interval to be added.
if (rangeMapOverlaps(_chunksMap, chunk.getMin(), chunk.getMax())) {
RangeVector overlap;
getRangeMapOverlap(_chunksMap, chunk.getMin(), chunk.getMax(), &overlap);
*errMsg = stream() << "cannot add chunk " << rangeToString(chunk.getMin(), chunk.getMax())
<< " because the chunk overlaps " << overlapToString(overlap);
warning() << *errMsg;
return NULL;
}
unique_ptr<CollectionMetadata> metadata(new CollectionMetadata);
metadata->_keyPattern = this->_keyPattern;
metadata->_keyPattern.getOwned();
metadata->fillKeyPatternFields();
metadata->_pendingMap = this->_pendingMap;
metadata->_chunksMap = this->_chunksMap;
metadata->_chunksMap.insert(make_pair(chunk.getMin().getOwned(), chunk.getMax().getOwned()));
metadata->_shardVersion = newShardVersion;
metadata->_collVersion = newShardVersion > _collVersion ? newShardVersion : this->_collVersion;
metadata->fillRanges();
invariant(metadata->isValid());
return metadata.release();
}
示例13: _processRemoteCommandResponse
Status MigrationManager::_processRemoteCommandResponse(
const RemoteCommandResponse& remoteCommandResponse,
ScopedMigrationRequest* scopedMigrationRequest) {
stdx::lock_guard<stdx::mutex> lock(_mutex);
Status commandStatus(ErrorCodes::InternalError, "Uninitialized value.");
// Check for local errors sending the remote command caused by stepdown.
if (isErrorDueToConfigStepdown(remoteCommandResponse.status,
_state != State::kEnabled && _state != State::kRecovering)) {
scopedMigrationRequest->keepDocumentOnDestruct();
return {ErrorCodes::BalancerInterrupted,
stream() << "Migration interrupted because the balancer is stopping."
<< " Command status: "
<< remoteCommandResponse.status.toString()};
}
if (!remoteCommandResponse.isOK()) {
commandStatus = remoteCommandResponse.status;
} else {
// TODO: delete in 3.8
commandStatus = extractMigrationStatusFromCommandResponse(remoteCommandResponse.data);
}
if (!Shard::shouldErrorBePropagated(commandStatus.code())) {
commandStatus = {ErrorCodes::OperationFailed,
stream() << "moveChunk command failed on source shard."
<< causedBy(commandStatus)};
}
// Any failure to remove the migration document should be because the config server is
// stepping/shutting down. In this case we must fail the moveChunk command with a retryable
// error so that the caller does not move on to other distlock requiring operations that could
// fail when the balancer recovers and takes distlocks for migration recovery.
Status status = scopedMigrationRequest->tryToRemoveMigration();
if (!status.isOK()) {
commandStatus = {
ErrorCodes::BalancerInterrupted,
stream() << "Migration interrupted because the balancer is stopping"
<< " and failed to remove the config.migrations document."
<< " Command status: "
<< (commandStatus.isOK() ? status.toString() : commandStatus.toString())};
}
return commandStatus;
}
示例14: clonePlusPending
CollectionMetadata* CollectionMetadata::clonePlusPending(const ChunkType& pending,
string* errMsg) const {
// The error message string is optional.
string dummy;
if (errMsg == NULL) {
errMsg = &dummy;
}
if (rangeMapOverlaps(_chunksMap, pending.getMin(), pending.getMax())) {
RangeVector overlap;
getRangeMapOverlap(_chunksMap, pending.getMin(), pending.getMax(), &overlap);
*errMsg = stream() << "cannot add pending chunk "
<< rangeToString(pending.getMin(), pending.getMax())
<< " because the chunk overlaps " << overlapToString(overlap);
warning() << *errMsg;
return NULL;
}
unique_ptr<CollectionMetadata> metadata(new CollectionMetadata);
metadata->_keyPattern = this->_keyPattern;
metadata->_keyPattern.getOwned();
metadata->fillKeyPatternFields();
metadata->_pendingMap = this->_pendingMap;
metadata->_chunksMap = this->_chunksMap;
metadata->_rangesMap = this->_rangesMap;
metadata->_shardVersion = _shardVersion;
metadata->_collVersion = _collVersion;
// If there are any pending chunks on the interval to be added this is ok, since pending
// chunks aren't officially tracked yet and something may have changed on servers we do not
// see yet.
// We remove any chunks we overlap, the remote request starting a chunk migration must have
// been authoritative.
if (rangeMapOverlaps(_pendingMap, pending.getMin(), pending.getMax())) {
RangeVector pendingOverlap;
getRangeMapOverlap(_pendingMap, pending.getMin(), pending.getMax(), &pendingOverlap);
warning() << "new pending chunk " << rangeToString(pending.getMin(), pending.getMax())
<< " overlaps existing pending chunks " << overlapToString(pendingOverlap)
<< ", a migration may not have completed";
for (RangeVector::iterator it = pendingOverlap.begin(); it != pendingOverlap.end(); ++it) {
metadata->_pendingMap.erase(it->first);
}
}
metadata->_pendingMap.insert(make_pair(pending.getMin(), pending.getMax()));
invariant(metadata->isValid());
return metadata.release();
}
示例15: cloneMinusPending
CollectionMetadata* CollectionMetadata::cloneMinusPending(const ChunkType& pending,
string* errMsg) const {
// The error message string is optional.
string dummy;
if (errMsg == NULL) {
errMsg = &dummy;
}
// Check that we have the exact chunk that will be subtracted.
if (!rangeMapContains(_pendingMap, pending.getMin(), pending.getMax())) {
*errMsg = stream() << "cannot remove pending chunk "
<< rangeToString(pending.getMin(), pending.getMax())
<< ", this shard does not contain the chunk";
if (rangeMapOverlaps(_pendingMap, pending.getMin(), pending.getMax())) {
RangeVector overlap;
getRangeMapOverlap(_pendingMap, pending.getMin(), pending.getMax(), &overlap);
*errMsg += stream() << " and it overlaps " << overlapToString(overlap);
}
warning() << *errMsg;
return NULL;
}
unique_ptr<CollectionMetadata> metadata(new CollectionMetadata);
metadata->_keyPattern = this->_keyPattern;
metadata->_keyPattern.getOwned();
metadata->fillKeyPatternFields();
metadata->_pendingMap = this->_pendingMap;
metadata->_pendingMap.erase(pending.getMin());
metadata->_chunksMap = this->_chunksMap;
metadata->_rangesMap = this->_rangesMap;
metadata->_shardVersion = _shardVersion;
metadata->_collVersion = _collVersion;
invariant(metadata->isValid());
return metadata.release();
}