本文整理汇总了C++中NamespaceString::coll方法的典型用法代码示例。如果您正苦于以下问题:C++ NamespaceString::coll方法的具体用法?C++ NamespaceString::coll怎么用?C++ NamespaceString::coll使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类NamespaceString
的用法示例。
在下文中一共展示了NamespaceString::coll方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: write
void ClusterWriter::write( const BatchedCommandRequest& origRequest,
BatchedCommandResponse* response ) {
// Add _ids to insert request if req'd
auto_ptr<BatchedCommandRequest> idRequest(BatchedCommandRequest::cloneWithIds(origRequest));
const BatchedCommandRequest& request = NULL != idRequest.get() ? *idRequest : origRequest;
const NamespaceString nss = NamespaceString( request.getNS() );
if ( !nss.isValid() ) {
toBatchError( Status( ErrorCodes::InvalidNamespace,
nss.ns() + " is not a valid namespace" ),
response );
return;
}
if ( !NamespaceString::validCollectionName( nss.coll() ) ) {
toBatchError( Status( ErrorCodes::BadValue,
str::stream() << "invalid collection name " << nss.coll() ),
response );
return;
}
if ( request.sizeWriteOps() == 0u ) {
toBatchError( Status( ErrorCodes::InvalidLength,
"no write ops were included in the batch" ),
response );
return;
}
if ( request.sizeWriteOps() > BatchedCommandRequest::kMaxWriteBatchSize ) {
toBatchError( Status( ErrorCodes::InvalidLength,
str::stream() << "exceeded maximum write batch size of "
<< BatchedCommandRequest::kMaxWriteBatchSize ),
response );
return;
}
string errMsg;
if ( request.isInsertIndexRequest() && !request.isValidIndexRequest( &errMsg ) ) {
toBatchError( Status( ErrorCodes::InvalidOptions, errMsg ), response );
return;
}
// Config writes and shard writes are done differently
string dbName = nss.db().toString();
if ( dbName == "config" || dbName == "admin" ) {
bool verboseWC = request.isVerboseWC();
// We only support batch sizes of one for config writes
if ( request.sizeWriteOps() != 1 ) {
toBatchError( Status( ErrorCodes::InvalidOptions,
mongoutils::str::stream() << "Writes to config servers must "
"have batch size of 1, found "
<< request.sizeWriteOps() ),
response );
return;
}
// We only support {w: 0}, {w: 1}, and {w: 'majority'} write concern for config writes
if ( request.isWriteConcernSet() && !validConfigWC( request.getWriteConcern() )) {
toBatchError( Status( ErrorCodes::InvalidOptions,
mongoutils::str::stream() << "Invalid write concern for write"
" to config servers: " << request.getWriteConcern() ),
response );
return;
}
// We need to support "best-effort" writes for pings to the config server.
// {w:0} (!verbose) writes are interpreted as best-effort in this case - they may still
// error, but do not do the initial fsync check.
configWrite( request, response, verboseWC );
}
else {
shardWrite( request, response );
}
}
示例2: userAllowedWriteNS
Status userAllowedWriteNS( const NamespaceString& ns ) {
return userAllowedWriteNS( ns.db(), ns.coll() );
}
示例3: executeBatch
/**
* The core config write functionality.
*
* Config writes run in two passes - the first is a quick check to ensure the config servers
* are all reachable, the second runs the actual write.
*
* TODO: Upgrade and move this logic to the config servers, a state machine implementation
* is probably the next step.
*/
void ConfigCoordinator::executeBatch(const BatchedCommandRequest& clientRequest,
BatchedCommandResponse* clientResponse) {
const NamespaceString nss(clientRequest.getNS());
// Should never use it for anything other than DBs residing on the config server
dassert(nss.db() == "config" || nss.db() == "admin");
dassert(clientRequest.sizeWriteOps() == 1u);
// This is an opportunistic check that all config servers look healthy by calling
// getLastError on each one of them. If there was some form of write/journaling error, get
// last error would fail.
{
for (vector<ConnectionString>::iterator it = _configHosts.begin();
it != _configHosts.end();
++it) {
_dispatcher->addCommand(*it,
"admin",
RawBSONSerializable(BSON("getLastError" << true <<
"fsync" << true)));
}
_dispatcher->sendAll();
bool error = false;
while (_dispatcher->numPending()) {
ConnectionString host;
RawBSONSerializable response;
Status status = _dispatcher->recvAny(&host, &response);
if (status.isOK()) {
BSONObj obj = response.toBSON();
LOG(3) << "Response " << obj.toString();
// If the ok field is anything other than 1, count it as error
if (!obj["ok"].trueValue()) {
error = true;
log() << "Config server check for host " << host
<< " returned error: " << response;
}
}
else {
error = true;
log() << "Config server check for host " << host
<< " failed with status: " << status;
}
}
// All responses should have been gathered by this point
if (error) {
clientResponse->setOk(false);
clientResponse->setErrCode(ErrorCodes::RemoteValidationError);
clientResponse->setErrMessage("Could not verify that config servers were active"
" and reachable before write");
return;
}
}
if (!_checkConfigString(clientResponse)) {
return;
}
//
// Do the actual writes
//
BatchedCommandRequest configRequest( clientRequest.getBatchType() );
clientRequest.cloneTo( &configRequest );
configRequest.setNS( nss.coll() );
OwnedPointerVector<ConfigResponse> responsesOwned;
vector<ConfigResponse*>& responses = responsesOwned.mutableVector();
//
// Send the actual config writes
//
// Get as many batches as we can at once
for (vector<ConnectionString>::const_iterator it = _configHosts.begin();
it != _configHosts.end();
++it) {
const ConnectionString& configHost = *it;
_dispatcher->addCommand(configHost, nss.db(), configRequest);
}
// Send them all out
_dispatcher->sendAll();
//.........这里部分代码省略.........
示例4: dropCollection
Status CatalogManagerReplicaSet::dropCollection(OperationContext* txn, const NamespaceString& ns) {
logChange(
txn, txn->getClient()->clientAddress(true), "dropCollection.start", ns.ns(), BSONObj());
vector<ShardType> allShards;
Status status = getAllShards(txn, &allShards);
if (!status.isOK()) {
return status;
}
LOG(1) << "dropCollection " << ns << " started";
// Lock the collection globally so that split/migrate cannot run
auto scopedDistLock = getDistLockManager()->lock(ns.ns(), "drop");
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus();
}
LOG(1) << "dropCollection " << ns << " locked";
std::map<string, BSONObj> errors;
auto* shardRegistry = grid.shardRegistry();
for (const auto& shardEntry : allShards) {
auto dropResult = shardRegistry->runCommandWithNotMasterRetries(
txn, shardEntry.getName(), ns.db().toString(), BSON("drop" << ns.coll()));
if (!dropResult.isOK()) {
return dropResult.getStatus();
}
auto dropStatus = getStatusFromCommandResult(dropResult.getValue());
if (!dropStatus.isOK()) {
if (dropStatus.code() == ErrorCodes::NamespaceNotFound) {
continue;
}
errors.emplace(shardEntry.getHost(), dropResult.getValue());
}
}
if (!errors.empty()) {
StringBuilder sb;
sb << "Dropping collection failed on the following hosts: ";
for (auto it = errors.cbegin(); it != errors.cend(); ++it) {
if (it != errors.cbegin()) {
sb << ", ";
}
sb << it->first << ": " << it->second;
}
return {ErrorCodes::OperationFailed, sb.str()};
}
LOG(1) << "dropCollection " << ns << " shard data deleted";
// Remove chunk data
Status result = remove(txn, ChunkType::ConfigNS, BSON(ChunkType::ns(ns.ns())), 0, nullptr);
if (!result.isOK()) {
return result;
}
LOG(1) << "dropCollection " << ns << " chunk data deleted";
// Mark the collection as dropped
CollectionType coll;
coll.setNs(ns);
coll.setDropped(true);
coll.setEpoch(ChunkVersion::DROPPED().epoch());
coll.setUpdatedAt(grid.shardRegistry()->getNetwork()->now());
result = updateCollection(txn, ns.ns(), coll);
if (!result.isOK()) {
return result;
}
LOG(1) << "dropCollection " << ns << " collection marked as dropped";
// We just called updateCollection above and this would have advanced the config op time, so use
// the latest value. On the MongoD side, we need to load the latest config metadata, which
// indicates that the collection was dropped.
const ChunkVersionAndOpTime droppedVersion(ChunkVersion::DROPPED(),
grid.shardRegistry()->getConfigOpTime());
for (const auto& shardEntry : allShards) {
SetShardVersionRequest ssv = SetShardVersionRequest::makeForVersioningNoPersist(
grid.shardRegistry()->getConfigServerConnectionString(),
shardEntry.getName(),
fassertStatusOK(28781, ConnectionString::parse(shardEntry.getHost())),
ns,
droppedVersion,
true);
auto ssvResult = shardRegistry->runCommandWithNotMasterRetries(
txn, shardEntry.getName(), "admin", ssv.toBSON());
if (!ssvResult.isOK()) {
return ssvResult.getStatus();
//.........这里部分代码省略.........
示例5: convertToCapped
Status convertToCapped(OperationContext* txn,
const NamespaceString& collectionName,
double size) {
StringData dbname = collectionName.db();
StringData shortSource = collectionName.coll();
ScopedTransaction transaction(txn, MODE_IX);
AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
!repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while converting "
<< collectionName.ns() << " to a capped collection");
}
Database* const db = autoDb.getDb();
if (!db) {
return Status(ErrorCodes::DatabaseNotFound,
str::stream() << "database " << dbname << " not found");
}
stopIndexBuildsConvertToCapped(txn, db, collectionName);
BackgroundOperation::assertNoBgOpInProgForDb(dbname);
std::string shortTmpName = str::stream() << "tmp.convertToCapped." << shortSource;
std::string longTmpName = str::stream() << dbname << "." << shortTmpName;
WriteUnitOfWork wunit(txn);
if (db->getCollection(longTmpName)) {
Status status = db->dropCollection(txn, longTmpName);
if (!status.isOK())
return status;
}
const bool shouldReplicateWrites = txn->writesAreReplicated();
txn->setReplicatedWrites(false);
ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
Status status = cloneCollectionAsCapped(txn,
db,
shortSource.toString(),
shortTmpName,
size,
true);
if (!status.isOK()) {
return status;
}
verify(db->getCollection(longTmpName));
status = db->dropCollection(txn, collectionName.ns());
txn->setReplicatedWrites(shouldReplicateWrites);
if (!status.isOK())
return status;
status = db->renameCollection(txn, longTmpName, collectionName.ns(), false);
if (!status.isOK())
return status;
getGlobalServiceContext()->getOpObserver()->onConvertToCapped(
txn,
NamespaceString(collectionName),
size);
wunit.commit();
return Status::OK();
}