本文整理汇总了C++中BSONObjBuilder::appendElementsUnique方法的典型用法代码示例。如果您正苦于以下问题:C++ BSONObjBuilder::appendElementsUnique方法的具体用法?C++ BSONObjBuilder::appendElementsUnique怎么用?C++ BSONObjBuilder::appendElementsUnique使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BSONObjBuilder
的用法示例。
在下文中一共展示了BSONObjBuilder::appendElementsUnique方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run
//.........这里部分代码省略.........
wunit.commit();
indexBuildRestorer.Dismiss();
return true;
}
}
// If we get here, we are renaming across databases, so we must copy all the data and
// indexes, then remove the source collection.
// Create the target collection. It will be removed if we fail to copy the collection.
// TODO use a temp collection and unset the temp flag on success.
Collection* targetColl = NULL;
{
CollectionOptions options;
options.setNoIdIndex();
if (sourceColl->isCapped()) {
// TODO stop assuming storageSize == cappedSize
options.capped = true;
options.cappedSize = sourceColl->getRecordStore()->storageSize(txn);
}
WriteUnitOfWork wunit(txn);
// No logOp necessary because the entire renameCollection command is one logOp.
targetColl = targetDB->createCollection(txn, target, options);
if (!targetColl) {
errmsg = "Failed to create target collection.";
return false;
}
wunit.commit();
}
// Dismissed on success
ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, txn, targetDB, target);
MultiIndexBlock indexer(txn, targetColl);
indexer.allowInterruption();
// Copy the index descriptions from the source collection, adjusting the ns field.
{
std::vector<BSONObj> indexesToCopy;
IndexCatalog::IndexIterator sourceIndIt =
sourceColl->getIndexCatalog()->getIndexIterator( txn, true );
while (sourceIndIt.more()) {
const BSONObj currIndex = sourceIndIt.next()->infoObj();
// Process the source index.
BSONObjBuilder newIndex;
newIndex.append("ns", target);
newIndex.appendElementsUnique(currIndex);
indexesToCopy.push_back(newIndex.obj());
}
indexer.init(indexesToCopy);
}
{
// Copy over all the data from source collection to target collection.
boost::scoped_ptr<RecordIterator> sourceIt(sourceColl->getIterator(txn));
while (!sourceIt->isEOF()) {
txn->checkForInterrupt(false);
const BSONObj obj = sourceColl->docFor(txn, sourceIt->getNext());
WriteUnitOfWork wunit(txn);
// No logOp necessary because the entire renameCollection command is one logOp.
Status status = targetColl->insertDocument(txn, obj, &indexer, true).getStatus();
if (!status.isOK())
return appendCommandStatus(result, status);
wunit.commit();
}
}
Status status = indexer.doneInserting();
if (!status.isOK())
return appendCommandStatus(result, status);
{
// Getting here means we successfully built the target copy. We now remove the
// source collection and finalize the rename.
WriteUnitOfWork wunit(txn);
Status status = sourceDB->dropCollection(txn, source);
if (!status.isOK())
return appendCommandStatus(result, status);
indexer.commit();
if (!fromRepl) {
repl::logOp(txn, "c", (dbname + ".$cmd").c_str(), cmdObj);
}
wunit.commit();
}
indexBuildRestorer.Dismiss();
targetCollectionDropper.Dismiss();
return true;
}
示例2: initialize
void DocumentSourceOut::initialize() {
invariant(_mongod);
DBClientBase* conn = _mongod->directClient();
// Save the original collection options and index specs so we can check they didn't change
// during computation.
_originalOutOptions = _mongod->getCollectionOptions(_outputNs);
_originalIndexes = conn->getIndexSpecs(_outputNs.ns());
// Check if it's sharded or capped to make sure we have a chance of succeeding before we do all
// the work. If the collection becomes capped during processing, the collection options will
// have changed, and the $out will fail. If it becomes sharded during processing, the final
// rename will fail.
uassert(17017,
str::stream() << "namespace '" << _outputNs.ns()
<< "' is sharded so it can't be used for $out'",
!_mongod->isSharded(_outputNs));
uassert(17152,
str::stream() << "namespace '" << _outputNs.ns()
<< "' is capped so it can't be used for $out",
_originalOutOptions["capped"].eoo());
// We will write all results into a temporary collection, then rename the temporary collection
// to be the target collection once we are done.
_tempNs = NamespaceString(str::stream() << _outputNs.db() << ".tmp.agg_out."
<< aggOutCounter.addAndFetch(1));
// Create output collection, copying options from existing collection if any.
{
BSONObjBuilder cmd;
cmd << "create" << _tempNs.coll();
cmd << "temp" << true;
cmd.appendElementsUnique(_originalOutOptions);
BSONObj info;
bool ok = conn->runCommand(_outputNs.db().toString(), cmd.done(), info);
uassert(16994,
str::stream() << "failed to create temporary $out collection '" << _tempNs.ns()
<< "': "
<< info.toString(),
ok);
}
// copy indexes to _tempNs
for (std::list<BSONObj>::const_iterator it = _originalIndexes.begin();
it != _originalIndexes.end();
++it) {
MutableDocument index((Document(*it)));
index.remove("_id"); // indexes shouldn't have _ids but some existing ones do
index["ns"] = Value(_tempNs.ns());
BSONObj indexBson = index.freeze().toBson();
conn->insert(_tempNs.getSystemIndexesCollection(), indexBson);
BSONObj err = conn->getLastErrorDetailed();
uassert(16995,
str::stream() << "copying index for $out failed."
<< " index: "
<< indexBson
<< " error: "
<< err,
DBClientWithCommands::getLastErrorString(err).empty());
}
_initialized = true;
}
示例3: renameCollection
//.........这里部分代码省略.........
wunit.commit();
}
// If we get here, we are renaming across databases, so we must copy all the data and
// indexes, then remove the source collection.
// Create the target collection. It will be removed if we fail to copy the collection.
// TODO use a temp collection and unset the temp flag on success.
Collection* targetColl = nullptr;
{
CollectionOptions options = sourceColl->getCatalogEntry()->getCollectionOptions(txn);
WriteUnitOfWork wunit(txn);
// No logOp necessary because the entire renameCollection command is one logOp.
bool shouldReplicateWrites = txn->writesAreReplicated();
txn->setReplicatedWrites(false);
targetColl = targetDB->createCollection(txn,
target.ns(),
options,
false); // _id index build with others later.
txn->setReplicatedWrites(shouldReplicateWrites);
if (!targetColl) {
return Status(ErrorCodes::OutOfDiskSpace, "Failed to create target collection.");
}
wunit.commit();
}
// Dismissed on success
ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, txn, targetDB, target.ns());
MultiIndexBlock indexer(txn, targetColl);
indexer.allowInterruption();
// Copy the index descriptions from the source collection, adjusting the ns field.
{
std::vector<BSONObj> indexesToCopy;
IndexCatalog::IndexIterator sourceIndIt =
sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
while (sourceIndIt.more()) {
const BSONObj currIndex = sourceIndIt.next()->infoObj();
// Process the source index.
BSONObjBuilder newIndex;
newIndex.append("ns", target.ns());
newIndex.appendElementsUnique(currIndex);
indexesToCopy.push_back(newIndex.obj());
}
indexer.init(indexesToCopy);
}
{
// Copy over all the data from source collection to target collection.
auto cursor = sourceColl->getCursor(txn);
while (auto record = cursor->next()) {
txn->checkForInterrupt();
const auto obj = record->data.releaseToBson();
WriteUnitOfWork wunit(txn);
// No logOp necessary because the entire renameCollection command is one logOp.
bool shouldReplicateWrites = txn->writesAreReplicated();
txn->setReplicatedWrites(false);
Status status = targetColl->insertDocument(txn, obj, &indexer, true).getStatus();
txn->setReplicatedWrites(shouldReplicateWrites);
if (!status.isOK())
return status;
wunit.commit();
}
}
Status status = indexer.doneInserting();
if (!status.isOK())
return status;
{
// Getting here means we successfully built the target copy. We now remove the
// source collection and finalize the rename.
WriteUnitOfWork wunit(txn);
bool shouldReplicateWrites = txn->writesAreReplicated();
txn->setReplicatedWrites(false);
Status status = sourceDB->dropCollection(txn, source.ns());
txn->setReplicatedWrites(shouldReplicateWrites);
if (!status.isOK())
return status;
indexer.commit();
getGlobalServiceContext()->getOpObserver()->onRenameCollection(
txn, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
wunit.commit();
}
targetCollectionDropper.Dismiss();
return Status::OK();
}
示例4: getLastError
bool ClientInfo::getLastError( const BSONObj& options , BSONObjBuilder& result , bool fromWriteBackListener ) {
set<string> * shards = getPrev();
if ( shards->size() == 0 ) {
result.appendNull( "err" );
return true;
}
vector<WBInfo> writebacks;
// handle single server
if ( shards->size() == 1 ) {
string theShard = *(shards->begin() );
ShardConnection conn( theShard , "", true );
BSONObj res;
bool ok = false;
try{
ok = conn->runCommand( "admin" , options , res );
}
catch( std::exception &e ){
warning() << "could not get last error from shard " << theShard << causedBy( e ) << endl;
// Catch everything that happens here, since we need to ensure we return our connection when we're
// finished.
conn.done();
return false;
}
res = res.getOwned();
conn.done();
_addWriteBack( writebacks , res );
// hit other machines just to block
for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) {
string temp = *i;
if ( temp == theShard )
continue;
ShardConnection conn( temp , "" );
try {
_addWriteBack( writebacks , conn->getLastErrorDetailed() );
}
catch( std::exception &e ){
warning() << "could not clear last error from shard " << temp << causedBy( e ) << endl;
}
conn.done();
}
clearSinceLastGetError();
if ( writebacks.size() ){
vector<BSONObj> v = _handleWriteBacks( writebacks , fromWriteBackListener );
if ( v.size() == 0 && fromWriteBackListener ) {
// ok
}
else {
assert( v.size() == 1 );
result.appendElements( v[0] );
result.appendElementsUnique( res );
result.append( "writebackGLE" , v[0] );
result.append( "initialGLEHost" , theShard );
}
}
else {
result.append( "singleShard" , theShard );
result.appendElements( res );
}
return ok;
}
BSONArrayBuilder bbb( result.subarrayStart( "shards" ) );
BSONObjBuilder shardRawGLE;
long long n = 0;
int updatedExistingStat = 0; // 0 is none, -1 has but false, 1 has true
// hit each shard
vector<string> errors;
vector<BSONObj> errorObjects;
for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ) {
string theShard = *i;
bbb.append( theShard );
ShardConnection conn( theShard , "", true );
BSONObj res;
bool ok = false;
try {
ok = conn->runCommand( "admin" , options , res );
shardRawGLE.append( theShard , res );
}
catch( std::exception &e ){
//.........这里部分代码省略.........