本文整理汇总了C++中BSONObjBuilder::obj方法的典型用法代码示例。如果您正苦于以下问题:C++ BSONObjBuilder::obj方法的具体用法?C++ BSONObjBuilder::obj怎么用?C++ BSONObjBuilder::obj使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BSONObjBuilder
的用法示例。
在下文中一共展示了BSONObjBuilder::obj方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: i
Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
StringData fromNS,
StringData toNS,
bool stayTemp) {
Status s = _renameSingleNamespace(txn, fromNS, toNS, stayTemp);
if (!s.isOK())
return s;
NamespaceDetails* details = _namespaceIndex.details(toNS);
invariant(details);
RecordStoreV1Base* systemIndexRecordStore = _getIndexRecordStore();
auto cursor = systemIndexRecordStore->getCursor(txn);
while (auto record = cursor->next()) {
BSONObj oldIndexSpec = record->data.releaseToBson();
if (fromNS != oldIndexSpec["ns"].valuestrsafe())
continue;
BSONObj newIndexSpec;
{
BSONObjBuilder b;
BSONObjIterator i(oldIndexSpec);
while (i.more()) {
BSONElement e = i.next();
if (strcmp(e.fieldName(), "ns") != 0)
b.append(e);
else
b << "ns" << toNS;
}
newIndexSpec = b.obj();
}
StatusWith<RecordId> newIndexSpecLoc = systemIndexRecordStore->insertRecord(
txn, newIndexSpec.objdata(), newIndexSpec.objsize(), false);
if (!newIndexSpecLoc.isOK())
return newIndexSpecLoc.getStatus();
const std::string& indexName = oldIndexSpec.getStringField("name");
{
// Fix the IndexDetails pointer.
int indexI = getCollectionCatalogEntry(toNS)->_findIndexNumber(txn, indexName);
IndexDetails& indexDetails = details->idx(indexI);
*txn->recoveryUnit()->writing(&indexDetails.info) =
DiskLoc::fromRecordId(newIndexSpecLoc.getValue());
}
{
// Move the underlying namespace.
std::string oldIndexNs = IndexDescriptor::makeIndexNamespace(fromNS, indexName);
std::string newIndexNs = IndexDescriptor::makeIndexNamespace(toNS, indexName);
Status s = _renameSingleNamespace(txn, oldIndexNs, newIndexNs, false);
if (!s.isOK())
return s;
}
systemIndexRecordStore->deleteRecord(txn, record->id);
}
return Status::OK();
}
示例2: GetNativeSystemInfo
//.........这里部分代码省略.........
verstr << osvi.dwMajorVersion << "." << osvi.dwMinorVersion;
if (osvi.wServicePackMajor)
verstr << " SP" << osvi.wServicePackMajor;
verstr << " (build " << osvi.dwBuildNumber << ")";
osName = "Microsoft ";
switch (osvi.dwMajorVersion) {
case 6:
switch (osvi.dwMinorVersion) {
case 3:
if (osvi.wProductType == VER_NT_WORKSTATION)
osName += "Windows 8.1";
else
osName += "Windows Server 2012 R2";
break;
case 2:
if (osvi.wProductType == VER_NT_WORKSTATION)
osName += "Windows 8";
else
osName += "Windows Server 2012";
break;
case 1:
if (osvi.wProductType == VER_NT_WORKSTATION)
osName += "Windows 7";
else
osName += "Windows Server 2008 R2";
// Windows 6.1 is either Windows 7 or Windows 2008 R2. There is no SP2 for
// either of these two operating systems, but the check will hold if one
// were released. This code assumes that SP2 will include fix for
// http://support.microsoft.com/kb/2731284.
//
if ((osvi.wServicePackMajor >= 0) && (osvi.wServicePackMajor < 2)) {
if (isKB2731284OrLaterUpdateInstalled()) {
log() << "Hotfix KB2731284 or later update is installed, no need "
"to zero-out data files";
fileZeroNeeded = false;
} else {
log() << "Hotfix KB2731284 or later update is not installed, will "
"zero-out data files";
fileZeroNeeded = true;
}
}
break;
case 0:
if (osvi.wProductType == VER_NT_WORKSTATION)
osName += "Windows Vista";
else
osName += "Windows Server 2008";
break;
default:
osName += "Windows NT version ";
osName += verstr.str();
break;
}
break;
case 5:
switch (osvi.dwMinorVersion) {
case 2:
osName += "Windows Server 2003";
break;
case 1:
osName += "Windows XP";
break;
case 0:
if (osvi.wProductType == VER_NT_WORKSTATION)
osName += "Windows 2000 Professional";
else
osName += "Windows 2000 Server";
break;
default:
osName += "Windows NT version ";
osName += verstr.str();
break;
}
break;
}
} else {
// unable to get any version data
osName += "Windows NT";
}
if (ntsysinfo.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
cpuArch = "x86_64";
} else if (ntsysinfo.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_INTEL) {
cpuArch = "x86";
} else if (ntsysinfo.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_IA64) {
cpuArch = "ia64";
} else {
cpuArch = "unknown";
}
osType = "Windows";
osVersion = verstr.str();
hasNuma = checkNumaEnabled();
_extraStats = bExtra.obj();
if (psapiGlobal == NULL) {
psapiGlobal = new PsApiInit();
}
}
示例3: report
string OpDebug::report(const CurOp& curop, const SingleThreadedLockStats& lockStats) const {
StringBuilder s;
if ( iscommand )
s << "command ";
else
s << opToString( op ) << ' ';
s << ns;
if ( ! query.isEmpty() ) {
if ( iscommand ) {
s << " command: ";
Command* curCommand = curop.getCommand();
if (curCommand) {
mutablebson::Document cmdToLog(query, mutablebson::Document::kInPlaceDisabled);
curCommand->redactForLogging(&cmdToLog);
s << curCommand->name << " ";
s << cmdToLog.toString();
}
else { // Should not happen but we need to handle curCommand == NULL gracefully
s << query.toString();
}
}
else {
s << " query: ";
s << query.toString();
}
}
if (!planSummary.empty()) {
s << " planSummary: " << planSummary.toString();
}
if ( ! updateobj.isEmpty() ) {
s << " update: ";
updateobj.toString( s );
}
OPDEBUG_TOSTRING_HELP( cursorid );
OPDEBUG_TOSTRING_HELP( ntoreturn );
OPDEBUG_TOSTRING_HELP( ntoskip );
OPDEBUG_TOSTRING_HELP_BOOL( exhaust );
OPDEBUG_TOSTRING_HELP( nscanned );
OPDEBUG_TOSTRING_HELP( nscannedObjects );
OPDEBUG_TOSTRING_HELP_BOOL( idhack );
OPDEBUG_TOSTRING_HELP_BOOL( scanAndOrder );
OPDEBUG_TOSTRING_HELP( nmoved );
OPDEBUG_TOSTRING_HELP( nMatched );
OPDEBUG_TOSTRING_HELP( nModified );
OPDEBUG_TOSTRING_HELP( ninserted );
OPDEBUG_TOSTRING_HELP( ndeleted );
OPDEBUG_TOSTRING_HELP_BOOL( fastmod );
OPDEBUG_TOSTRING_HELP_BOOL( fastmodinsert );
OPDEBUG_TOSTRING_HELP_BOOL( upsert );
OPDEBUG_TOSTRING_HELP_BOOL( cursorExhausted );
OPDEBUG_TOSTRING_HELP( keyUpdates );
OPDEBUG_TOSTRING_HELP( writeConflicts );
if ( extra.len() )
s << " " << extra.str();
if ( ! exceptionInfo.empty() ) {
s << " exception: " << exceptionInfo.msg;
if ( exceptionInfo.code )
s << " code:" << exceptionInfo.code;
}
s << " numYields:" << curop.numYields();
OPDEBUG_TOSTRING_HELP( nreturned );
if (responseLength > 0) {
s << " reslen:" << responseLength;
}
{
BSONObjBuilder locks;
lockStats.report(&locks);
s << " locks:" << locks.obj().toString();
}
s << " " << executionTime << "ms";
return s.str();
}
示例4: getOutputSorts
BSONObjSet DocumentSourceGroup::getOutputSorts() {
if (!_initialized) {
initialize();
}
if (!(_streaming || _spilled)) {
return BSONObjSet();
}
BSONObjBuilder sortOrder;
if (_idFieldNames.empty()) {
if (_spilled) {
sortOrder.append("_id", 1);
} else {
// We have an expression like {_id: "$a"}. Check if this is a FieldPath, and if it is,
// get the sort order out of it.
if (auto obj = dynamic_cast<ExpressionFieldPath*>(_idExpressions[0].get())) {
FieldPath _idSort = obj->getFieldPath();
sortOrder.append(
"_id",
_inputSort.getIntField(_idSort.getFieldName(_idSort.getPathLength() - 1)));
}
}
} else if (_streaming) {
// At this point, we know that _streaming is true, so _id must have only contained
// ExpressionObjects, ExpressionConstants or ExpressionFieldPaths. We now process each
// '_idExpression'.
// We populate 'fieldMap' such that each key is a field the input is sorted by, and the
// value is where that input field is located within the _id document. For example, if our
// _id object is {_id: {x: {y: "$a.b"}}}, 'fieldMap' would be: {'a.b': '_id.x.y'}.
StringMap<std::string> fieldMap;
for (size_t i = 0; i < _idFieldNames.size(); i++) {
intrusive_ptr<Expression> exp = _idExpressions[i];
if (auto obj = dynamic_cast<ExpressionObject*>(exp.get())) {
// _id is an object containing a nested document, such as: {_id: {x: {y: "$b"}}}.
getFieldPathMap(obj, "_id." + _idFieldNames[i], &fieldMap);
} else if (auto fieldPath = dynamic_cast<ExpressionFieldPath*>(exp.get())) {
FieldPath _idSort = fieldPath->getFieldPath();
fieldMap[_idSort.getFieldName(_idSort.getPathLength() - 1)] =
"_id." + _idFieldNames[i];
}
}
// Because the order of '_inputSort' is important, we go through each field we are sorted on
// and append it to the BSONObjBuilder in order.
for (BSONElement sortField : _inputSort) {
std::string sortString = sortField.fieldNameStringData().toString();
auto itr = fieldMap.find(sortString);
// If our sort order is (a, b, c), we could not have converted to a streaming $group if
// our _id was predicated on (a, c) but not 'b'. Verify that this is true.
invariant(itr != fieldMap.end());
sortOrder.append(itr->second, _inputSort.getIntField(sortString));
}
} else {
// We are blocking and have spilled to disk.
std::vector<std::string> outputSort;
for (size_t i = 0; i < _idFieldNames.size(); i++) {
intrusive_ptr<Expression> exp = _idExpressions[i];
if (auto obj = dynamic_cast<ExpressionObject*>(exp.get())) {
// _id is an object containing a nested document, such as: {_id: {x: {y: "$b"}}}.
getFieldPathListForSpilled(obj, "_id." + _idFieldNames[i], &outputSort);
} else {
outputSort.push_back("_id." + _idFieldNames[i]);
}
}
for (auto&& field : outputSort) {
sortOrder.append(field, 1);
}
}
return allPrefixes(sortOrder.obj());
}
示例5: run
bool run(OperationContext* txn, const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
DBDirectClient db;
BSONElement e = jsobj.firstElement();
string toDeleteNs = dbname + '.' + e.valuestr();
LOG(0) << "CMD: reIndex " << toDeleteNs << endl;
Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(toDeleteNs);
Collection* collection = ctx.db()->getCollection( txn, toDeleteNs );
if ( !collection ) {
errmsg = "ns not found";
return false;
}
BackgroundOperation::assertNoBgOpInProgForNs( toDeleteNs );
std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, ctx.db(), jsobj);
list<BSONObj> all;
auto_ptr<DBClientCursor> i = db.query( dbname + ".system.indexes" , BSON( "ns" << toDeleteNs ) , 0 , 0 , 0 , QueryOption_SlaveOk );
BSONObjBuilder b;
while ( i->more() ) {
const BSONObj spec = i->next().removeField("v").getOwned();
const BSONObj key = spec.getObjectField("key");
const Status keyStatus = validateKeyPattern(key);
if (!keyStatus.isOK()) {
errmsg = str::stream()
<< "Cannot rebuild index " << spec << ": " << keyStatus.reason()
<< " For more info see http://dochub.mongodb.org/core/index-validation";
return false;
}
b.append( BSONObjBuilder::numStr( all.size() ) , spec );
all.push_back( spec );
}
result.appendNumber( "nIndexesWas", collection->getIndexCatalog()->numIndexesTotal() );
Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
if ( !s.isOK() ) {
errmsg = "dropIndexes failed";
return appendCommandStatus( result, s );
}
for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
BSONObj o = *i;
LOG(1) << "reIndex ns: " << toDeleteNs << " index: " << o << endl;
Status s = collection->getIndexCatalog()->createIndex(txn, o, false);
if ( !s.isOK() )
return appendCommandStatus( result, s );
}
result.append( "nIndexes" , (int)all.size() );
result.appendArray( "indexes" , b.obj() );
IndexBuilder::restoreIndexes(indexesInProg);
return true;
}
示例6: searchCommand
void searchCommand(NamespaceDetails* nsd,
const BSONObj& n /*near*/, double maxDistance, const BSONObj& search,
BSONObjBuilder& result, unsigned limit) {
Timer t;
LOG(1) << "SEARCH near:" << n << " maxDistance:" << maxDistance
<< " search: " << search << endl;
int x, y;
{
BSONObjIterator i(n);
x = hash(i.next());
y = hash(i.next());
}
int scale = static_cast<int>(ceil(maxDistance / _bucketSize));
GeoHaystackSearchHopper hopper(n, maxDistance, limit, _geoField);
long long btreeMatches = 0;
// TODO(hk): Consider starting with a (or b)=0, then going to a=+-1, then a=+-2, etc.
// Would want a HaystackKeyIterator or similar for this, but it'd be a nice
// encapsulation allowing us to S2-ify this trivially/abstract the key details.
for (int a = -scale; a <= scale && !hopper.limitReached(); ++a) {
for (int b = -scale; b <= scale && !hopper.limitReached(); ++b) {
BSONObjBuilder bb;
bb.append("", makeString(x + a, y + b));
for (unsigned i = 0; i < _otherFields.size(); i++) {
// See if the non-geo field we're indexing on is in the provided search term.
BSONElement e = search.getFieldDotted(_otherFields[i]);
if (e.eoo())
bb.appendNull("");
else
bb.appendAs(e, "");
}
BSONObj key = bb.obj();
GEOQUADDEBUG("KEY: " << key);
// TODO(hk): this keeps a set of all DiskLoc seen in this pass so that we don't
// consider the element twice. Do we want to instead store a hash of the set?
// Is this often big?
set<DiskLoc> thisPass;
// Lookup from key to key, inclusive.
scoped_ptr<BtreeCursor> cursor(BtreeCursor::make(nsd,
*getDetails(),
key,
key,
true,
1));
while (cursor->ok() && !hopper.limitReached()) {
pair<set<DiskLoc>::iterator, bool> p = thisPass.insert(cursor->currLoc());
// If a new element was inserted (haven't seen the DiskLoc before), p.second
// is true.
if (p.second) {
hopper.consider(cursor->currLoc());
GEOQUADDEBUG("\t" << cursor->current());
btreeMatches++;
}
cursor->advance();
}
}
}
BSONArrayBuilder arr(result.subarrayStart("results"));
int num = hopper.appendResultsTo(&arr);
arr.done();
{
BSONObjBuilder b(result.subobjStart("stats"));
b.append("time", t.millis());
b.appendNumber("btreeMatches", btreeMatches);
b.append("n", num);
b.done();
}
}
示例7: operator
void operator()(DBClientCursorBatchIterator &i) {
const string to_dbname = nsToDatabase(to_collection);
while (i.moreInCurrentBatch()) {
if (n % 128 == 127) {
time_t now = time(0);
if (now - lastLog >= 60) {
// report progress
if (lastLog) {
log() << "clone " << to_collection << ' ' << n << endl;
}
lastLog = now;
}
mayInterrupt(_mayBeInterrupted);
}
BSONObj js = i.nextSafe();
++n;
if (isindex) {
verify(nsToCollectionSubstring(from_collection) == "system.indexes");
storedForLater->push_back(fixindex(js, to_dbname).getOwned());
}
else {
try {
LOCK_REASON(lockReason, "cloner: copying documents into local collection");
Client::ReadContext ctx(to_collection, lockReason);
if (_isCapped) {
Collection *cl = getCollection(to_collection);
verify(cl->isCapped());
BSONObj pk = js["$_"].Obj();
BSONObjBuilder rowBuilder;
BSONObjIterator it(js);
while (it.moreWithEOO()) {
BSONElement e = it.next();
if (e.eoo()) {
break;
}
if (!mongoutils::str::equals(e.fieldName(), "$_")) {
rowBuilder.append(e);
}
}
BSONObj row = rowBuilder.obj();
CappedCollection *cappedCl = cl->as<CappedCollection>();
bool indexBitChanged = false;
cappedCl->insertObjectWithPK(pk, row, Collection::NO_LOCKTREE, &indexBitChanged);
// Hack copied from Collection::insertObject. TODO: find a better way to do this
if (indexBitChanged) {
cl->noteMultiKeyChanged();
}
}
else {
insertObject(to_collection, js, 0, logForRepl);
}
}
catch (UserException& e) {
error() << "error: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
throw;
}
RARELY if ( time( 0 ) - saveLast > 60 ) {
log() << n << " objects cloned so far from collection " << from_collection << endl;
saveLast = time( 0 );
}
}
}
}
示例8: toObject
BSONObj Model::toObject(){
BSONObjBuilder b;
serialize( b );
return b.obj();
}
示例9: save
void Model::save( bool safe ){
ScopedDbConnection conn( modelServer() );
BSONObjBuilder b;
serialize( b );
BSONElement myId;
{
BSONObjIterator i = b.iterator();
while ( i.more() ){
BSONElement e = i.next();
if ( strcmp( e.fieldName() , "_id" ) == 0 ){
myId = e;
break;
}
}
}
if ( myId.type() ){
if ( _id.isEmpty() ){
_id = myId.wrap();
}
else if ( myId.woCompare( _id.firstElement() ) ){
stringstream ss;
ss << "_id from serialize and stored differ: ";
ss << "[" << myId << "] != ";
ss << "[" << _id.firstElement() << "]";
throw UserException( 13121 , ss.str() );
}
}
if ( _id.isEmpty() ){
OID oid;
oid.init();
b.appendOID( "_id" , &oid );
BSONObj o = b.obj();
conn->insert( getNS() , o );
_id = o["_id"].wrap().getOwned();
log(4) << "inserted new model " << getNS() << " " << o << endl;
}
else {
if ( myId.eoo() ){
myId = _id["_id"];
b.append( myId );
}
assert( ! myId.eoo() );
BSONObjBuilder qb;
qb.append( myId );
BSONObj q = qb.obj();
BSONObj o = b.obj();
log(4) << "updated model" << getNS() << " " << q << " " << o << endl;
conn->update( getNS() , q , o , true );
}
string errmsg = "";
if ( safe )
errmsg = conn->getLastError();
conn.done();
if ( safe && errmsg.size() )
throw UserException( 9003 , (string)"error on Model::save: " + errmsg );
}
示例10: run
//.........这里部分代码省略.........
}
Status s = cc().database()->dropCollection( target );
if ( !s.isOK() ) {
errmsg = s.toString();
restoreIndexBuildsOnSource( indexesInProg, source );
return false;
}
}
// If we are renaming in the same database, just
// rename the namespace and we're done.
if ( sourceDB == targetDB ) {
Status s = ctx.db()->renameCollection( source, target,
cmdObj["stayTemp"].trueValue() );
if ( !s.isOK() ) {
errmsg = s.toString();
restoreIndexBuildsOnSource( indexesInProg, source );
return false;
}
return true;
}
// Otherwise, we are enaming across databases, so we must copy all
// the data and then remove the source collection.
// Create the target collection.
Collection* targetColl = NULL;
if ( capped ) {
BSONObjBuilder spec;
spec.appendBool( "capped", true );
spec.append( "size", double( size ) );
spec.appendBool( "autoIndexId", false );
userCreateNS( target.c_str(), spec.obj(), errmsg, false );
targetColl = ctx.db()->getCollection( target );
}
else {
CollectionOptions options;
options.setNoIdIndex();
// No logOp necessary because the entire renameCollection command is one logOp.
targetColl = ctx.db()->createCollection( target, options );
}
if ( !targetColl ) {
errmsg = "Failed to create target collection.";
restoreIndexBuildsOnSource( indexesInProg, source );
return false;
}
}
// Copy over all the data from source collection to target collection.
bool insertSuccessful = true;
boost::scoped_ptr<CollectionIterator> sourceIt;
{
Client::Context srcCtx( source );
Collection* sourceColl = srcCtx.db()->getCollection( source );
sourceIt.reset( sourceColl->getIterator( DiskLoc(), false, CollectionScanParams::FORWARD ) );
}
Collection* targetColl = NULL;
while ( !sourceIt->isEOF() ) {
BSONObj o;
{
Client::Context srcCtx( source );
o = sourceIt->getNext().obj();
}
示例11: populateDocumentWithQueryFields
Status UpdateDriver::populateDocumentWithQueryFields(const CanonicalQuery* query,
mutablebson::Document& doc) const {
MatchExpression* root = query->root();
MatchExpression::MatchType rootType = root->matchType();
// These copies are needed until we apply the modifiers at the end.
std::vector<BSONObj> copies;
// We only care about equality and "and"ed equality fields, everything else is ignored
if (rootType != MatchExpression::EQ && rootType != MatchExpression::AND)
return Status::OK();
if (isDocReplacement()) {
BSONElement idElem = query->getQueryObj().getField("_id");
// Replacement mods need the _id field copied explicitly.
if (idElem.ok()) {
mb::Element elem = doc.makeElement(idElem);
return doc.root().pushFront(elem);
}
return Status::OK();
}
// Create a new UpdateDriver to create the base doc from the query
Options opts;
opts.logOp = false;
opts.modOptions = modOptions();
UpdateDriver insertDriver(opts);
insertDriver.setContext(ModifierInterface::ExecInfo::INSERT_CONTEXT);
// If we are a single equality match query
if (root->matchType() == MatchExpression::EQ) {
EqualityMatchExpression* eqMatch =
static_cast<EqualityMatchExpression*>(root);
const BSONElement matchData = eqMatch->getData();
BSONElement childElem = matchData;
// Make copy to new path if not the same field name (for cases like $all)
if (!root->path().empty() && matchData.fieldNameStringData() != root->path()) {
BSONObjBuilder copyBuilder;
copyBuilder.appendAs(eqMatch->getData(), root->path());
const BSONObj copy = copyBuilder.obj();
copies.push_back(copy);
childElem = copy[root->path()];
}
// Add this element as a $set modifier
Status s = insertDriver.addAndParse(modifiertable::MOD_SET,
childElem);
if (!s.isOK())
return s;
}
else {
// parse query $set mods, including only equality stuff
for (size_t i = 0; i < root->numChildren(); ++i) {
MatchExpression* child = root->getChild(i);
if (child->matchType() == MatchExpression::EQ) {
EqualityMatchExpression* eqMatch =
static_cast<EqualityMatchExpression*>(child);
const BSONElement matchData = eqMatch->getData();
BSONElement childElem = matchData;
// Make copy to new path if not the same field name (for cases like $all)
if (!child->path().empty() &&
matchData.fieldNameStringData() != child->path()) {
BSONObjBuilder copyBuilder;
copyBuilder.appendAs(eqMatch->getData(), child->path());
const BSONObj copy = copyBuilder.obj();
copies.push_back(copy);
childElem = copy[child->path()];
}
// Add this element as a $set modifier
Status s = insertDriver.addAndParse(modifiertable::MOD_SET,
childElem);
if (!s.isOK())
return s;
}
}
}
// update the document with base field
Status s = insertDriver.update(StringData(), &doc);
copies.clear();
if (!s.isOK()) {
return Status(ErrorCodes::UnsupportedFormat,
str::stream() << "Cannot create base during"
" insert of update. Caused by :"
<< s.toString());
}
return Status::OK();
//.........这里部分代码省略.........
示例12: run
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string target = cmdObj.firstElement().valuestrsafe();
Shard s = Shard::make( target );
if ( ! grid.knowAboutShard( s.getConnString() ) ) {
errmsg = "unknown shard";
return false;
}
ScopedDbConnection conn( configServer.getPrimary() );
// If the server is not yet draining chunks, put it in draining mode.
BSONObj searchDoc = BSON( "_id" << s.getName() );
BSONObj drainingDoc = BSON( "_id" << s.getName() << ShardFields::draining(true) );
BSONObj shardDoc = conn->findOne( "config.shards", drainingDoc );
if ( shardDoc.isEmpty() ) {
// TODO prevent move chunks to this shard.
log() << "going to start draining shard: " << s.getName() << endl;
BSONObj newStatus = BSON( "$set" << BSON( ShardFields::draining(true) ) );
conn->update( "config.shards" , searchDoc , newStatus, false /* do no upsert */);
errmsg = conn->getLastError();
if ( errmsg.size() ) {
log() << "error starting remove shard: " << s.getName() << " err: " << errmsg << endl;
return false;
}
Shard::reloadShardInfo();
result.append( "msg" , "draining started successfully" );
result.append( "state" , "started" );
result.append( "shard" , s.getName() );
conn.done();
return true;
}
// If the server has been completely drained, remove it from the ConfigDB.
// Check not only for chunks but also databases.
BSONObj shardIDDoc = BSON( "shard" << shardDoc[ "_id" ].str() );
long long chunkCount = conn->count( "config.chunks" , shardIDDoc );
BSONObj primaryDoc = BSON( "primary" << shardDoc[ "_id" ].str() );
long long dbCount = conn->count( "config.databases" , primaryDoc );
if ( ( chunkCount == 0 ) && ( dbCount == 0 ) ) {
log() << "going to remove shard: " << s.getName() << endl;
conn->remove( "config.shards" , searchDoc );
errmsg = conn->getLastError();
if ( errmsg.size() ) {
log() << "error concluding remove shard: " << s.getName() << " err: " << errmsg << endl;
return false;
}
Shard::removeShard( shardDoc[ "_id" ].str() );
Shard::reloadShardInfo();
result.append( "msg" , "removeshard completed successfully" );
result.append( "state" , "completed" );
result.append( "shard" , s.getName() );
conn.done();
return true;
}
// If the server is already in draining mode, just report on its progress.
// Report on databases (not just chunks) that are left too.
result.append( "msg" , "draining ongoing" );
result.append( "state" , "ongoing" );
BSONObjBuilder inner;
inner.append( "chunks" , chunkCount );
inner.append( "dbs" , dbCount );
result.append( "remaining" , inner.obj() );
conn.done();
return true;
}
示例13: handleSpecialNamespaces
bool handleSpecialNamespaces( Request& r , QueryMessage& q ) {
const char * ns = r.getns();
ns = strstr( r.getns() , ".$cmd.sys." );
if ( ! ns )
return false;
ns += 10;
BSONObjBuilder b;
vector<Shard> shards;
if ( strcmp( ns , "inprog" ) == 0 ) {
Shard::getAllShards( shards );
BSONArrayBuilder arr( b.subarrayStart( "inprog" ) );
for ( unsigned i=0; i<shards.size(); i++ ) {
Shard shard = shards[i];
ScopedDbConnection conn( shard );
BSONObj temp = conn->findOne( r.getns() , BSONObj() );
if ( temp["inprog"].isABSONObj() ) {
BSONObjIterator i( temp["inprog"].Obj() );
while ( i.more() ) {
BSONObjBuilder x;
BSONObjIterator j( i.next().Obj() );
while( j.more() ) {
BSONElement e = j.next();
if ( str::equals( e.fieldName() , "opid" ) ) {
stringstream ss;
ss << shard.getName() << ':' << e.numberInt();
x.append( "opid" , ss.str() );
}
else if ( str::equals( e.fieldName() , "client" ) ) {
x.appendAs( e , "client_s" );
}
else {
x.append( e );
}
}
arr.append( x.obj() );
}
}
conn.done();
}
arr.done();
}
else if ( strcmp( ns , "killop" ) == 0 ) {
BSONElement e = q.query["op"];
if ( strstr( r.getns() , "admin." ) != 0 ) {
b.append( "err" , "unauthorized" );
}
else if ( e.type() != String ) {
b.append( "err" , "bad op" );
b.append( e );
}
else {
b.append( e );
string s = e.String();
string::size_type i = s.find( ':' );
if ( i == string::npos ) {
b.append( "err" , "bad opid" );
}
else {
string shard = s.substr( 0 , i );
int opid = atoi( s.substr( i + 1 ).c_str() );
b.append( "shard" , shard );
b.append( "shardid" , opid );
log() << "want to kill op: " << e << endl;
Shard s(shard);
ScopedDbConnection conn( s );
conn->findOne( r.getns() , BSON( "op" << opid ) );
conn.done();
}
}
}
else if ( strcmp( ns , "unlock" ) == 0 ) {
b.append( "err" , "can't do unlock through mongos" );
}
else {
log( LL_WARNING ) << "unknown sys command [" << ns << "]" << endl;
return false;
}
BSONObj x = b.done();
replyToQuery(0, r.p(), r.m(), x);
return true;
}
示例14: Status
Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace(OperationContext* txn,
StringData fromNS,
StringData toNS,
bool stayTemp) {
// some sanity checking
NamespaceDetails* fromDetails = _namespaceIndex.details(fromNS);
if (!fromDetails)
return Status(ErrorCodes::BadValue, "from namespace doesn't exist");
if (_namespaceIndex.details(toNS))
return Status(ErrorCodes::BadValue, "to namespace already exists");
// at this point, we haven't done anything destructive yet
// ----
// actually start moving
// ----
// this could throw, but if it does we're ok
_namespaceIndex.add_ns(txn, toNS, fromDetails);
NamespaceDetails* toDetails = _namespaceIndex.details(toNS);
try {
toDetails->copyingFrom(txn, toNS, _namespaceIndex, fromDetails); // fixes extraOffset
} catch (DBException&) {
// could end up here if .ns is full - if so try to clean up / roll back a little
_namespaceIndex.kill_ns(txn, toNS);
throw;
}
// at this point, code .ns stuff moved
_namespaceIndex.kill_ns(txn, fromNS);
fromDetails = NULL;
// fix system.namespaces
BSONObj newSpec;
RecordId oldSpecLocation = getCollectionCatalogEntry(fromNS)->getNamespacesRecordId();
invariant(!oldSpecLocation.isNull());
{
BSONObj oldSpec = _getNamespaceRecordStore()->dataFor(txn, oldSpecLocation).releaseToBson();
invariant(!oldSpec.isEmpty());
BSONObjBuilder b;
BSONObjIterator i(oldSpec.getObjectField("options"));
while (i.more()) {
BSONElement e = i.next();
if (strcmp(e.fieldName(), "create") != 0) {
if (stayTemp || (strcmp(e.fieldName(), "temp") != 0))
b.append(e);
} else {
b << "create" << toNS;
}
}
newSpec = b.obj();
}
RecordId rid = _addNamespaceToNamespaceCollection(txn, toNS, newSpec.isEmpty() ? 0 : &newSpec);
_getNamespaceRecordStore()->deleteRecord(txn, oldSpecLocation);
Entry*& entry = _collections[toNS.toString()];
invariant(entry == NULL);
txn->recoveryUnit()->registerChange(new EntryInsertion(toNS, this));
entry = new Entry();
_removeFromCache(txn->recoveryUnit(), fromNS);
_insertInCache(txn, toNS, rid, entry);
return Status::OK();
}
示例15: _summarizeStatus
void ReplSetImpl::_summarizeStatus(BSONObjBuilder& b) const {
vector<BSONObj> v;
const Member *_self = this->_self;
assert( _self );
MemberState myState = box.getState();
// add self
{
BSONObjBuilder bb;
bb.append("_id", (int) _self->id());
bb.append("name", _self->fullName());
bb.append("health", 1.0);
bb.append("state", (int)myState.s);
bb.append("stateStr", myState.toString());
bb.append("uptime", (unsigned)(time(0) - cmdLine.started));
if (!_self->config().arbiterOnly) {
bb.appendTimestamp("optime", lastOpTimeWritten.asDate());
bb.appendDate("optimeDate", lastOpTimeWritten.getSecs() * 1000LL);
}
int maintenance = _maintenanceMode;
if (maintenance) {
bb.append("maintenanceMode", maintenance);
}
if (theReplSet) {
string s = theReplSet->hbmsg();
if( !s.empty() )
bb.append("errmsg", s);
}
bb.append("self", true);
v.push_back(bb.obj());
}
Member *m =_members.head();
while( m ) {
BSONObjBuilder bb;
bb.append("_id", (int) m->id());
bb.append("name", m->fullName());
double h = m->hbinfo().health;
bb.append("health", h);
bb.append("state", (int) m->state().s);
if( h == 0 ) {
// if we can't connect the state info is from the past and could be confusing to show
bb.append("stateStr", "(not reachable/healthy)");
}
else {
bb.append("stateStr", m->state().toString());
}
bb.append("uptime", (unsigned) (m->hbinfo().upSince ? (time(0)-m->hbinfo().upSince) : 0));
if (!m->config().arbiterOnly) {
bb.appendTimestamp("optime", m->hbinfo().opTime.asDate());
bb.appendDate("optimeDate", m->hbinfo().opTime.getSecs() * 1000LL);
}
bb.appendTimeT("lastHeartbeat", m->hbinfo().lastHeartbeat);
bb.append("pingMs", m->hbinfo().ping);
string s = m->lhb();
if( !s.empty() )
bb.append("errmsg", s);
if (m->hbinfo().authIssue) {
bb.append("authenticated", false);
}
v.push_back(bb.obj());
m = m->next();
}
sort(v.begin(), v.end());
b.append("set", name());
b.appendTimeT("date", time(0));
b.append("myState", myState.s);
const Member *syncTarget = _currentSyncTarget;
if (syncTarget && myState != MemberState::RS_PRIMARY) {
b.append("syncingTo", syncTarget->fullName());
}
b.append("members", v);
if( replSetBlind )
b.append("blind",true); // to avoid confusion if set...normally never set except for testing.
}