本文整理汇总了C++中BSONObjBuilder类的典型用法代码示例。如果您正苦于以下问题:C++ BSONObjBuilder类的具体用法?C++ BSONObjBuilder怎么用?C++ BSONObjBuilder使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了BSONObjBuilder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: opToString
string OpDebug::report(const CurOp& curop, const SingleThreadedLockStats& lockStats) const {
StringBuilder s;
if ( iscommand )
s << "command ";
else
s << opToString( op ) << ' ';
s << ns;
if ( ! query.isEmpty() ) {
if ( iscommand ) {
s << " command: ";
Command* curCommand = curop.getCommand();
if (curCommand) {
mutablebson::Document cmdToLog(query, mutablebson::Document::kInPlaceDisabled);
curCommand->redactForLogging(&cmdToLog);
s << curCommand->name << " ";
s << cmdToLog.toString();
}
else { // Should not happen but we need to handle curCommand == NULL gracefully
s << query.toString();
}
}
else {
s << " query: ";
s << query.toString();
}
}
if (!planSummary.empty()) {
s << " planSummary: " << planSummary.toString();
}
if ( ! updateobj.isEmpty() ) {
s << " update: ";
updateobj.toString( s );
}
OPDEBUG_TOSTRING_HELP( cursorid );
OPDEBUG_TOSTRING_HELP( ntoreturn );
OPDEBUG_TOSTRING_HELP( ntoskip );
OPDEBUG_TOSTRING_HELP_BOOL( exhaust );
OPDEBUG_TOSTRING_HELP( nscanned );
OPDEBUG_TOSTRING_HELP( nscannedObjects );
OPDEBUG_TOSTRING_HELP_BOOL( idhack );
OPDEBUG_TOSTRING_HELP_BOOL( scanAndOrder );
OPDEBUG_TOSTRING_HELP( nmoved );
OPDEBUG_TOSTRING_HELP( nMatched );
OPDEBUG_TOSTRING_HELP( nModified );
OPDEBUG_TOSTRING_HELP( ninserted );
OPDEBUG_TOSTRING_HELP( ndeleted );
OPDEBUG_TOSTRING_HELP_BOOL( fastmod );
OPDEBUG_TOSTRING_HELP_BOOL( fastmodinsert );
OPDEBUG_TOSTRING_HELP_BOOL( upsert );
OPDEBUG_TOSTRING_HELP_BOOL( cursorExhausted );
OPDEBUG_TOSTRING_HELP( keyUpdates );
OPDEBUG_TOSTRING_HELP( writeConflicts );
if ( extra.len() )
s << " " << extra.str();
if ( ! exceptionInfo.empty() ) {
s << " exception: " << exceptionInfo.msg;
if ( exceptionInfo.code )
s << " code:" << exceptionInfo.code;
}
s << " numYields:" << curop.numYields();
OPDEBUG_TOSTRING_HELP( nreturned );
if (responseLength > 0) {
s << " reslen:" << responseLength;
}
{
BSONObjBuilder locks;
lockStats.report(&locks);
s << " locks:" << locks.obj().toString();
}
s << " " << executionTime << "ms";
return s.str();
}
示例2: objFromElement
// static
void IndexBoundsBuilder::translate(const MatchExpression* expr, int direction,
OrderedIntervalList* oilOut, bool* exactOut) {
Interval interval;
bool exact = false;
if (expr->isLeaf()) {
if (MatchExpression::EQ == expr->matchType()) {
const EqualityMatchExpression* node = static_cast<const EqualityMatchExpression*>(expr);
// We have to copy the data out of the parse tree and stuff it into the index bounds.
// BSONValue will be useful here.
BSONObj dataObj = objFromElement(node->getData());
if (dataObj.couldBeArray()) {
// XXX: build better bounds
warning() << "building lazy bounds for " << expr->toString() << endl;
interval = allValues();
exact = false;
}
else {
verify(dataObj.isOwned());
interval = makePointInterval(dataObj);
exact = true;
}
}
else if (MatchExpression::LTE == expr->matchType()) {
const LTEMatchExpression* node = static_cast<const LTEMatchExpression*>(expr);
BSONObjBuilder bob;
bob.appendMinKey("");
bob.append(node->getData());
BSONObj dataObj = bob.obj();
verify(dataObj.isOwned());
interval = makeRangeInterval(dataObj, true, true);
exact = true;
}
else if (MatchExpression::LT == expr->matchType()) {
const LTMatchExpression* node = static_cast<const LTMatchExpression*>(expr);
BSONObjBuilder bob;
bob.appendMinKey("");
bob.append(node->getData());
BSONObj dataObj = bob.obj();
verify(dataObj.isOwned());
interval = makeRangeInterval(dataObj, true, false);
exact = true;
}
else if (MatchExpression::GT == expr->matchType()) {
const GTMatchExpression* node = static_cast<const GTMatchExpression*>(expr);
BSONObjBuilder bob;
bob.append(node->getData());
bob.appendMaxKey("");
BSONObj dataObj = bob.obj();
verify(dataObj.isOwned());
interval = makeRangeInterval(dataObj, false, true);
exact = true;
}
else if (MatchExpression::GTE == expr->matchType()) {
const GTEMatchExpression* node = static_cast<const GTEMatchExpression*>(expr);
BSONObjBuilder bob;
bob.append(node->getData());
bob.appendMaxKey("");
BSONObj dataObj = bob.obj();
verify(dataObj.isOwned());
interval = makeRangeInterval(dataObj, true, true);
exact = true;
}
else {
// XXX: build better bounds
warning() << "building lazy bounds for " << expr->toString() << endl;
interval = allValues();
exact = false;
}
}
else {
// XXX: build better bounds
verify(expr->isArray());
warning() << "building lazy bounds for " << expr->toString() << endl;
interval = allValues();
exact = false;
}
if (-1 == direction) {
reverseInterval(&interval);
}
oilOut->intervals.push_back(interval);
*exactOut = exact;
}
示例3: BSON
// liberally cribbed from user_prio.cpp
void
plumage::stats::processAccountantStats(ClassAd* ad, ODSMongodbOps* ops, Date_t& ts)
{
// attr%d holders...sadly reverting back to MyString for convenience of formatstr
MyString attrName, attrPrio, attrResUsed, attrWtResUsed, attrFactor, attrBeginUsage, attrAccUsage;
MyString attrLastUsage, attrAcctGroup, attrIsAcctGroup;
MyString attrConfigQuota, attrEffectiveQuota, attrSubtreeQuota, attrSurplusPolicy;
// values
string name, acctGroup, surplusPolicy;
float priority, factor, wtResUsed, configQuota, effectiveQuota, subtreeQuota, accUsage = -1;
int resUsed, beginUsage, lastUsage;
resUsed = beginUsage = lastUsage = 0;
bool isAcctGroup;
DBClientConnection* conn = ops->m_db_conn;
conn->ensureIndex(DB_STATS_SAMPLES_ACCOUNTANT, BSON( "ts" << -1 ));
conn->ensureIndex(DB_STATS_SAMPLES_ACCOUNTANT, BSON( "lu" << -1 ));
conn->ensureIndex(DB_STATS_SAMPLES_ACCOUNTANT, BSON( "n" << 1 ));
unsigned long long acct_count = conn->count(DB_STATS_SAMPLES_ACCOUNTANT);
// eventhough the Accountant doesn't forget
// we don't care about stale submitters (default: last 24 hours)
int cfg_last_usage = param_integer("ODS_ACCOUNTANT_LAST_USAGE", 60*60*24);
int minLastUsageTime = time(0)-cfg_last_usage;
int numElem = -1;
ad->LookupInteger( "NumSubmittors", numElem );
for( int i=1; i<=numElem; i++) {
priority=0;
isAcctGroup = false;
// skip stale records unless we have none
attrLastUsage.formatstr("LastUsageTime%d", i );
ad->LookupInteger ( attrLastUsage.Value(), lastUsage );
if (lastUsage < minLastUsageTime && acct_count > 0)
continue;
// parse the horrid classad
attrName.formatstr("Name%d", i );
attrPrio.formatstr("Priority%d", i );
attrResUsed.formatstr("ResourcesUsed%d", i );
attrWtResUsed.formatstr("WeightedResourcesUsed%d", i );
attrFactor.formatstr("PriorityFactor%d", i );
attrBeginUsage.formatstr("BeginUsageTime%d", i );
attrAccUsage.formatstr("WeightedAccumulatedUsage%d", i );
attrAcctGroup.formatstr("AccountingGroup%d", i);
attrIsAcctGroup.formatstr("IsAccountingGroup%d", i);
attrConfigQuota.formatstr("ConfigQuota%d", i);
attrEffectiveQuota.formatstr("EffectiveQuota%d", i);
attrSubtreeQuota.formatstr("SubtreeQuota%d", i);
attrSurplusPolicy.formatstr("SurplusPolicy%d", i);
ad->LookupString ( attrName.Value(), name );
ad->LookupFloat ( attrPrio.Value(), priority );
ad->LookupFloat ( attrFactor.Value(), factor );
ad->LookupFloat ( attrAccUsage.Value(), accUsage );
ad->LookupInteger ( attrBeginUsage.Value(), beginUsage );
ad->LookupInteger ( attrResUsed.Value(), resUsed );
ad->LookupBool ( attrIsAcctGroup.Value(), isAcctGroup);
ad->LookupFloat ( attrConfigQuota.Value(), configQuota );
ad->LookupFloat ( attrEffectiveQuota.Value(), effectiveQuota );
ad->LookupFloat ( attrSubtreeQuota.Value(), subtreeQuota );
ad->LookupString ( attrSurplusPolicy.Value(), surplusPolicy );
if( !ad->LookupFloat( attrWtResUsed.Value(), wtResUsed ) ) {
wtResUsed = resUsed;
}
if (!ad->LookupString(attrAcctGroup.Value(), acctGroup)) {
acctGroup = "<none>";
}
BSONObjBuilder bob;
bob.appendDate("ts",ts);
bob.append("n",name);
bob.append("ag",acctGroup);
bob.appendAsNumber("prio",formatReal(priority));
bob.appendAsNumber("fac",formatReal(factor));
bob.append("ru",resUsed);
bob.append("wru",wtResUsed);
// condor timestamps need massaging when going in the db
bob.appendDate("bu",static_cast<unsigned long long>(beginUsage)*1000);
bob.appendDate("lu",static_cast<unsigned long long>(lastUsage)*1000);
bob.appendAsNumber("au",formatReal(accUsage));
bob.appendAsNumber("cq",formatReal(configQuota));
bob.appendAsNumber("eq",formatReal(effectiveQuota));
bob.appendAsNumber("sq",formatReal(subtreeQuota));
if (!surplusPolicy.empty()) bob.append("sp",surplusPolicy);
conn->insert(DB_STATS_SAMPLES_ACCOUNTANT,bob.obj());
}
}
示例4: append
bool OpDebug::append(const CurOp& curop, BSONObjBuilder& b, size_t maxSize) const {
b.append( "op" , iscommand ? "command" : opToString( op ) );
b.append( "ns" , ns.toString() );
int queryUpdateObjSize = 0;
if (!query.isEmpty()) {
queryUpdateObjSize += query.objsize();
}
else if (!iscommand && curop.haveQuery()) {
queryUpdateObjSize += curop.query()["query"].size();
}
if (!updateobj.isEmpty()) {
queryUpdateObjSize += updateobj.objsize();
}
if (static_cast<size_t>(queryUpdateObjSize) > maxSize) {
if (!query.isEmpty()) {
// Use 60 since BSONObj::toString can truncate strings into 150 chars
// and we want to have enough room for both query and updateobj when
// the entire document is going to be serialized into a string
const string abbreviated(query.toString(false, false), 0, 60);
b.append(iscommand ? "command" : "query", abbreviated + "...");
}
else if (!iscommand && curop.haveQuery()) {
const string abbreviated(curop.query()["query"].toString(false, false), 0, 60);
b.append("query", abbreviated + "...");
}
if (!updateobj.isEmpty()) {
const string abbreviated(updateobj.toString(false, false), 0, 60);
b.append("updateobj", abbreviated + "...");
}
return false;
}
if (!query.isEmpty()) {
b.append(iscommand ? "command" : "query", query);
}
else if (!iscommand && curop.haveQuery()) {
curop.appendQuery(b, "query");
}
if (!updateobj.isEmpty()) {
b.append("updateobj", updateobj);
}
const bool moved = (nmoved >= 1);
OPDEBUG_APPEND_NUMBER( cursorid );
OPDEBUG_APPEND_NUMBER( ntoreturn );
OPDEBUG_APPEND_NUMBER( ntoskip );
OPDEBUG_APPEND_BOOL( exhaust );
OPDEBUG_APPEND_NUMBER( nscanned );
OPDEBUG_APPEND_BOOL( idhack );
OPDEBUG_APPEND_BOOL( scanAndOrder );
OPDEBUG_APPEND_BOOL( moved );
OPDEBUG_APPEND_NUMBER( nmoved );
OPDEBUG_APPEND_NUMBER( nupdated );
OPDEBUG_APPEND_BOOL( fastmod );
OPDEBUG_APPEND_BOOL( fastmodinsert );
OPDEBUG_APPEND_BOOL( upsert );
OPDEBUG_APPEND_NUMBER( keyUpdates );
b.appendNumber( "numYield" , curop.numYields() );
b.append( "lockStats" , curop.lockStat().report() );
if ( ! exceptionInfo.empty() )
exceptionInfo.append( b , "exception" , "exceptionCode" );
OPDEBUG_APPEND_NUMBER( nreturned );
OPDEBUG_APPEND_NUMBER( responseLength );
b.append( "millis" , executionTime );
return true;
}
示例5: Status
Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace( OperationContext* txn,
const StringData& fromNS,
const StringData& toNS,
bool stayTemp ) {
// some sanity checking
NamespaceDetails* fromDetails = _namespaceIndex.details( fromNS );
if ( !fromDetails )
return Status( ErrorCodes::BadValue, "from namespace doesn't exist" );
if ( _namespaceIndex.details( toNS ) )
return Status( ErrorCodes::BadValue, "to namespace already exists" );
// at this point, we haven't done anything destructive yet
// ----
// actually start moving
// ----
// this could throw, but if it does we're ok
_namespaceIndex.add_ns( txn, toNS, fromDetails );
NamespaceDetails* toDetails = _namespaceIndex.details( toNS );
try {
toDetails->copyingFrom(txn,
toNS,
_namespaceIndex,
fromDetails); // fixes extraOffset
}
catch( DBException& ) {
// could end up here if .ns is full - if so try to clean up / roll back a little
_namespaceIndex.kill_ns( txn, toNS );
throw;
}
// at this point, code .ns stuff moved
_namespaceIndex.kill_ns( txn, fromNS );
fromDetails = NULL;
// fix system.namespaces
BSONObj newSpec;
DiskLoc oldSpecLocation;
{
BSONObj oldSpec;
{
RecordStoreV1Base* rs = _getNamespaceRecordStore( txn, fromNS );
scoped_ptr<RecordIterator> it( rs->getIterator() );
while ( !it->isEOF() ) {
DiskLoc loc = it->getNext();
const Record* rec = it->recordFor( loc );
BSONObj entry( rec->data() );
if ( fromNS == entry["name"].String() ) {
oldSpecLocation = loc;
oldSpec = entry.getOwned();
break;
}
}
}
invariant( !oldSpec.isEmpty() );
invariant( !oldSpecLocation.isNull() );
BSONObjBuilder b;
BSONObjIterator i( oldSpec.getObjectField( "options" ) );
while( i.more() ) {
BSONElement e = i.next();
if ( strcmp( e.fieldName(), "create" ) != 0 ) {
if (stayTemp || (strcmp(e.fieldName(), "temp") != 0))
b.append( e );
}
else {
b << "create" << toNS;
}
}
newSpec = b.obj();
}
_addNamespaceToNamespaceCollection( txn, toNS, newSpec.isEmpty() ? 0 : &newSpec );
_getNamespaceRecordStore( txn, fromNS )->deleteRecord( txn, oldSpecLocation );
return Status::OK();
}
示例6: invariant
Status ChunkMoveOperationState::commitMigration(const MigrationSessionId& sessionId) {
invariant(_distLockStatus.is_initialized());
invariant(_distLockStatus->isOK());
log() << "About to enter migrate critical section";
// We're under the collection distributed lock here, so no other migrate can change maxVersion
// or CollectionMetadata state.
ShardingState* const shardingState = ShardingState::get(_txn);
Status startStatus = ShardingStateRecovery::startMetadataOp(_txn);
if (!startStatus.isOK()) {
warning() << "Failed to write sharding state recovery document" << causedBy(startStatus);
return startStatus;
}
shardingState->migrationSourceManager()->setInCriticalSection(true);
const ChunkVersion originalCollVersion = getCollMetadata()->getCollVersion();
ChunkVersion myVersion = originalCollVersion;
myVersion.incMajor();
{
ScopedTransaction transaction(_txn, MODE_IX);
Lock::DBLock lk(_txn->lockState(), _nss.db(), MODE_IX);
Lock::CollectionLock collLock(_txn->lockState(), _nss.ns(), MODE_X);
invariant(myVersion > shardingState->getVersion(_nss.ns()));
// Bump the metadata's version up and "forget" about the chunk being moved. This is
// not the commit point, but in practice the state in this shard won't change until
// the commit it done.
shardingState->donateChunk(_txn, _nss.ns(), _minKey, _maxKey, myVersion);
}
log() << "moveChunk setting version to: " << myVersion << migrateLog;
// We're under the collection lock here, too, so we can undo the chunk donation because
// no other state change could be ongoing
BSONObj res;
Status recvChunkCommitStatus{ErrorCodes::InternalError, "status not set"};
try {
ScopedDbConnection connTo(_toShardCS, 35.0);
connTo->runCommand("admin", createRecvChunkCommitRequest(sessionId), res);
connTo.done();
recvChunkCommitStatus = getStatusFromCommandResult(res);
} catch (const DBException& e) {
const string msg = stream() << "moveChunk could not contact to shard " << _toShard
<< " to commit transfer" << causedBy(e);
warning() << msg;
recvChunkCommitStatus = Status(e.toStatus().code(), msg);
}
if (MONGO_FAIL_POINT(failMigrationCommit) && recvChunkCommitStatus.isOK()) {
recvChunkCommitStatus =
Status(ErrorCodes::InternalError, "Failing _recvChunkCommit due to failpoint.");
}
if (!recvChunkCommitStatus.isOK()) {
log() << "moveChunk migrate commit not accepted by TO-shard: " << res
<< " resetting shard version to: " << getShardVersion() << migrateLog;
{
ScopedTransaction transaction(_txn, MODE_IX);
Lock::DBLock dbLock(_txn->lockState(), _nss.db(), MODE_IX);
Lock::CollectionLock collLock(_txn->lockState(), _nss.ns(), MODE_X);
log() << "moveChunk collection lock acquired to reset shard version from "
"failed migration";
// Revert the chunk manager back to the state before "forgetting" about the chunk
shardingState->undoDonateChunk(_txn, _nss.ns(), getCollMetadata());
}
log() << "Shard version successfully reset to clean up failed migration";
return Status(recvChunkCommitStatus.code(),
stream() << "_recvChunkCommit failed: " << causedBy(recvChunkCommitStatus));
}
log() << "moveChunk migrate commit accepted by TO-shard: " << res << migrateLog;
BSONArrayBuilder updates;
{
// Update for the chunk being moved
BSONObjBuilder op;
op.append("op", "u");
op.appendBool("b", false); // No upserting
op.append("ns", ChunkType::ConfigNS);
BSONObjBuilder n(op.subobjStart("o"));
n.append(ChunkType::name(), Chunk::genID(_nss.ns(), _minKey));
myVersion.addToBSON(n, ChunkType::DEPRECATED_lastmod());
n.append(ChunkType::ns(), _nss.ns());
n.append(ChunkType::min(), _minKey);
n.append(ChunkType::max(), _maxKey);
n.append(ChunkType::shard(), _toShard);
//.........这里部分代码省略.........
示例7: run
void run(){
Scope * s = globalScriptEngine->newScope();
// -- A --
BSONObj o;
{
BSONObjBuilder b ;
b.append( "a" , (int)5 );
b.append( "b" , 5.6 );
o = b.obj();
}
ASSERT_EQUALS( NumberInt , o["a"].type() );
ASSERT_EQUALS( NumberDouble , o["b"].type() );
s->setObject( "z" , o );
s->invoke( "return z" , BSONObj() );
BSONObj out = s->getObject( "return" );
ASSERT_EQUALS( 5 , out["a"].number() );
ASSERT_EQUALS( 5.6 , out["b"].number() );
ASSERT_EQUALS( NumberDouble , out["b"].type() );
ASSERT_EQUALS( NumberInt , out["a"].type() );
// -- B --
{
BSONObjBuilder b ;
b.append( "a" , (int)5 );
b.append( "b" , 5.6 );
o = b.obj();
}
s->setObject( "z" , o , false );
s->invoke( "return z" , BSONObj() );
out = s->getObject( "return" );
ASSERT_EQUALS( 5 , out["a"].number() );
ASSERT_EQUALS( 5.6 , out["b"].number() );
ASSERT_EQUALS( NumberDouble , out["b"].type() );
ASSERT_EQUALS( NumberInt , out["a"].type() );
// -- C --
{
BSONObjBuilder b ;
{
BSONObjBuilder c;
c.append( "0" , 5.5 );
c.append( "1" , 6 );
b.appendArray( "a" , c.obj() );
}
o = b.obj();
}
ASSERT_EQUALS( NumberDouble , o["a"].embeddedObjectUserCheck()["0"].type() );
ASSERT_EQUALS( NumberInt , o["a"].embeddedObjectUserCheck()["1"].type() );
s->setObject( "z" , o , false );
out = s->getObject( "z" );
ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() );
ASSERT_EQUALS( NumberInt , out["a"].embeddedObjectUserCheck()["1"].type() );
s->invokeSafe( "z.z = 5;" , BSONObj() );
out = s->getObject( "z" );
ASSERT_EQUALS( 5 , out["z"].number() );
ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() );
// Commenting so that v8 tests will work
// ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["1"].type() ); // TODO: this is technically bad, but here to make sure that i understand the behavior
// Eliot says I don't have to worry about this case
// // -- D --
//
// o = fromjson( "{a:3.0,b:4.5}" );
// ASSERT_EQUALS( NumberDouble , o["a"].type() );
// ASSERT_EQUALS( NumberDouble , o["b"].type() );
//
// s->setObject( "z" , o , false );
// s->invoke( "return z" , BSONObj() );
// out = s->getObject( "return" );
// ASSERT_EQUALS( 3 , out["a"].number() );
// ASSERT_EQUALS( 4.5 , out["b"].number() );
//
// ASSERT_EQUALS( NumberDouble , out["b"].type() );
// ASSERT_EQUALS( NumberDouble , out["a"].type() );
//
delete s;
}
示例8: run
virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( !check(errmsg, result) )
return false;
result.append("rbid",rbid);
return true;
}
示例9: run
bool run(OperationContext* txn,
const string& dbname,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
_runCalled = true;
const auto service = txn->getServiceContext();
const auto clock = service->getFastClockSource();
const auto runStart = clock->now();
BSONObjBuilder timeBuilder(256);
const auto authSession = AuthorizationSession::get(Client::getCurrent());
// --- basic fields that are global
result.append("host", prettyHostName());
result.append("version", VersionInfoInterface::instance().version());
result.append("process", serverGlobalParams.binaryName);
result.append("pid", ProcessId::getCurrent().asLongLong());
result.append("uptime", (double)(time(0) - serverGlobalParams.started));
auto uptime = clock->now() - _started;
result.append("uptimeMillis", durationCount<Milliseconds>(uptime));
result.append("uptimeEstimate", durationCount<Seconds>(uptime));
result.appendDate("localTime", jsTime());
timeBuilder.appendNumber("after basic",
durationCount<Milliseconds>(clock->now() - runStart));
// --- all sections
for (SectionMap::const_iterator i = _sections->begin(); i != _sections->end(); ++i) {
ServerStatusSection* section = i->second;
std::vector<Privilege> requiredPrivileges;
section->addRequiredPrivileges(&requiredPrivileges);
if (!authSession->isAuthorizedForPrivileges(requiredPrivileges))
continue;
bool include = section->includeByDefault();
const auto& elem = cmdObj[section->getSectionName()];
if (elem.type()) {
include = elem.trueValue();
}
if (!include) {
continue;
}
section->appendSection(txn, elem, &result);
timeBuilder.appendNumber(
static_cast<string>(str::stream() << "after " << section->getSectionName()),
durationCount<Milliseconds>(clock->now() - runStart));
}
// --- counters
bool includeMetricTree = MetricTree::theMetricTree != NULL;
if (cmdObj["metrics"].type() && !cmdObj["metrics"].trueValue())
includeMetricTree = false;
if (includeMetricTree) {
MetricTree::theMetricTree->appendTo(result);
}
// --- some hard coded global things hard to pull out
{
RamLog::LineIterator rl(RamLog::get("warnings"));
if (rl.lastWrite() >= time(0) - (10 * 60)) { // only show warnings from last 10 minutes
BSONArrayBuilder arr(result.subarrayStart("warnings"));
while (rl.more()) {
arr.append(rl.next());
}
arr.done();
}
}
auto runElapsed = clock->now() - runStart;
timeBuilder.appendNumber("at end", durationCount<Milliseconds>(runElapsed));
if (runElapsed > Milliseconds(1000)) {
BSONObj t = timeBuilder.obj();
log() << "serverStatus was very slow: " << t;
result.append("timing", t);
}
return true;
}
示例10: applyOperation_inlock
/** @param fromRepl false if from ApplyOpsCmd
@return true if was and update should have happened and the document DNE. see replset initial sync code.
*/
bool applyOperation_inlock(const BSONObj& op, bool fromRepl, bool convertUpdateToUpsert) {
LOG(3) << "applying op: " << op << endl;
bool failedUpdate = false;
OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;
const char *names[] = { "o", "ns", "op", "b" };
BSONElement fields[4];
op.getFields(4, names, fields);
BSONObj o;
if( fields[0].isABSONObj() )
o = fields[0].embeddedObject();
const char *ns = fields[1].valuestrsafe();
Lock::assertWriteLocked(ns);
NamespaceDetails *nsd = nsdetails(ns);
// operation type -- see logOp() comments for types
const char *opType = fields[2].valuestrsafe();
if ( *opType == 'i' ) {
opCounters->gotInsert();
const char *p = strchr(ns, '.');
if ( p && strcmp(p, ".system.indexes") == 0 ) {
if (o["background"].trueValue()) {
IndexBuilder* builder = new IndexBuilder(ns, o);
// This spawns a new thread and returns immediately.
builder->go();
}
else {
IndexBuilder builder(ns, o);
// Finish the foreground build before returning
builder.build();
}
}
else {
// do upserts for inserts as we might get replayed more than once
OpDebug debug;
BSONElement _id;
if( !o.getObjectID(_id) ) {
/* No _id. This will be very slow. */
Timer t;
updateObjectsForReplication(ns, o, o, true, false, false, debug, false,
QueryPlanSelectionPolicy::idElseNatural() );
if( t.millis() >= 2 ) {
RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
}
}
else {
// probably don't need this since all replicated colls have _id indexes now
// but keep it just in case
RARELY if ( nsd && !nsd->isCapped() ) { ensureHaveIdIndex(ns, false); }
/* todo : it may be better to do an insert here, and then catch the dup key exception and do update
then. very few upserts will not be inserts...
*/
BSONObjBuilder b;
b.append(_id);
updateObjectsForReplication(ns, o, b.done(), true, false, false , debug, false,
QueryPlanSelectionPolicy::idElseNatural() );
}
}
}
示例11: createOplog
void createOplog() {
Lock::GlobalWrite lk;
const char * ns = "local.oplog.$main";
bool rs = !cmdLine._replSet.empty();
if( rs )
ns = rsoplog;
Client::Context ctx(ns);
NamespaceDetails * nsd = nsdetails( ns );
if ( nsd ) {
if ( cmdLine.oplogSize != 0 ) {
int o = (int)(nsd->storageSize() / ( 1024 * 1024 ) );
int n = (int)(cmdLine.oplogSize / ( 1024 * 1024 ) );
if ( n != o ) {
stringstream ss;
ss << "cmdline oplogsize (" << n << ") different than existing (" << o << ") see: http://dochub.mongodb.org/core/increase-oplog";
log() << ss.str() << endl;
throw UserException( 13257 , ss.str() );
}
}
if( rs ) return;
DBDirectClient c;
BSONObj lastOp = c.findOne( ns, Query().sort(reverseNaturalObj) );
if ( !lastOp.isEmpty() ) {
OpTime::setLast( lastOp[ "ts" ].date() );
}
return;
}
/* create an oplog collection, if it doesn't yet exist. */
BSONObjBuilder b;
double sz;
if ( cmdLine.oplogSize != 0 )
sz = (double)cmdLine.oplogSize;
else {
/* not specified. pick a default size */
sz = 50.0 * 1024 * 1024;
if ( sizeof(int *) >= 8 ) {
#if defined(__APPLE__)
// typically these are desktops (dev machines), so keep it smallish
sz = (256-64) * 1024 * 1024;
#else
sz = 990.0 * 1024 * 1024;
boost::intmax_t free = File::freeSpace(dbpath); //-1 if call not supported.
double fivePct = free * 0.05;
if ( fivePct > sz )
sz = fivePct;
#endif
}
}
log() << "******" << endl;
log() << "creating replication oplog of size: " << (int)( sz / ( 1024 * 1024 ) ) << "MB..." << endl;
b.append("size", sz);
b.appendBool("capped", 1);
b.appendBool("autoIndexId", false);
string err;
BSONObj o = b.done();
userCreateNS(ns, o, err, false);
if( !rs )
logOp( "n", "", BSONObj() );
/* sync here so we don't get any surprising lag later when we try to sync */
MemoryMappedFile::flushAll(true);
log() << "******" << endl;
}
示例12: handleRESTQuery
void handleRESTQuery( string ns , string action , BSONObj & params , int & responseCode , stringstream & out ) {
Timer t;
int skip = _getOption( params["skip"] , 0 );
int num = _getOption( params["limit"] , _getOption( params["count" ] , 1000 ) ); // count is old, limit is new
int one = 0;
if ( params["one"].type() == String && tolower( params["one"].valuestr()[0] ) == 't' ) {
num = 1;
one = 1;
}
BSONObjBuilder queryBuilder;
BSONObjIterator i(params);
while ( i.more() ){
BSONElement e = i.next();
string name = e.fieldName();
if ( ! name.find( "filter_" ) == 0 )
continue;
const char * field = name.substr( 7 ).c_str();
const char * val = e.valuestr();
char * temp;
// TODO: this is how i guess if something is a number. pretty lame right now
double number = strtod( val , &temp );
if ( temp != val )
queryBuilder.append( field , number );
else
queryBuilder.append( field , val );
}
BSONObj query = queryBuilder.obj();
auto_ptr<DBClientCursor> cursor = db.query( ns.c_str() , query, num , skip );
if ( one ) {
if ( cursor->more() ) {
BSONObj obj = cursor->next();
out << obj.jsonString() << "\n";
}
else {
responseCode = 404;
}
return;
}
out << "{\n";
out << " \"offset\" : " << skip << ",\n";
out << " \"rows\": [\n";
int howMany = 0;
while ( cursor->more() ) {
if ( howMany++ )
out << " ,\n";
BSONObj obj = cursor->next();
out << " " << obj.jsonString();
}
out << "\n ],\n\n";
out << " \"total_rows\" : " << howMany << " ,\n";
out << " \"query\" : " << query.jsonString() << " ,\n";
out << " \"millis\" : " << t.millis() << "\n";
out << "}\n";
}
示例13: mongoSubscribeContext
/* ****************************************************************************
*
* mongoSubscribeContext -
*/
HttpStatusCode mongoSubscribeContext(SubscribeContextRequest* requestP, SubscribeContextResponse* responseP, Format inFormat)
{
/* Take semaphore. The LM_S* family of macros combines semaphore release with return */
semTake();
LM_T(LmtMongo, ("Subscribe Context Request"));
DBClientConnection* connection = getMongoConnection();
/* If expiration is not present, then use a default one */
if (requestP->duration.isEmpty()) {
requestP->duration.set(DEFAULT_DURATION);
}
/* Calculate expiration (using the current time and the duration field in the request) */
long long expiration = getCurrentTime() + requestP->duration.parse();
LM_T(LmtMongo, ("Subscription expiration: %lu", expiration));
/* Create the mongoDB subscription document */
BSONObjBuilder sub;
OID oid;
oid.init();
sub.append("_id", oid);
sub.append(CSUB_EXPIRATION, expiration);
sub.append(CSUB_REFERENCE, requestP->reference.get());
/* Throttling */
if (!requestP->throttling.isEmpty()) {
sub.append(CSUB_THROTTLING, requestP->throttling.parse());
}
/* Build entities array */
BSONArrayBuilder entities;
for (unsigned int ix = 0; ix < requestP->entityIdVector.size(); ++ix) {
EntityId* en = requestP->entityIdVector.get(ix);
entities.append(BSON(CSUB_ENTITY_ID << en->id <<
CSUB_ENTITY_TYPE << en->type <<
CSUB_ENTITY_ISPATTERN << en->isPattern));
}
sub.append(CSUB_ENTITIES, entities.arr());
/* Build attributes array */
BSONArrayBuilder attrs;
for (unsigned int ix = 0; ix < requestP->attributeList.size(); ++ix) {
attrs.append(requestP->attributeList.get(ix));
}
sub.append(CSUB_ATTRS, attrs.arr());
/* Build conditions array (including side-effect notifications and threads creation) */
bool notificationDone = false;
BSONArray conds = processConditionVector(&requestP->notifyConditionVector,
requestP->entityIdVector,
requestP->attributeList, oid.str(),
requestP->reference.get(),
¬ificationDone,
inFormat);
sub.append(CSUB_CONDITIONS, conds);
if (notificationDone) {
sub.append(CSUB_LASTNOTIFICATION, getCurrentTime());
sub.append(CSUB_COUNT, 1);
}
/* Adding format to use in notifications */
sub.append(CSUB_FORMAT, std::string(formatToString(inFormat)));
/* Insert document in database */
BSONObj subDoc = sub.obj();
try {
LM_T(LmtMongo, ("insert() in '%s' collection: '%s'", getSubscribeContextCollectionName(), subDoc.toString().c_str()));
connection->insert(getSubscribeContextCollectionName(), subDoc);
}
catch( const DBException &e ) {
responseP->subscribeError.errorCode.fill(SccReceiverInternalError,
std::string("collection: ") + getSubscribeContextCollectionName() +
" - insert(): " + subDoc.toString() +
" - exception: " + e.what());
LM_SRE(SccOk,("Database error '%s'", responseP->subscribeError.errorCode.reasonPhrase.c_str()));
}
/* Fill the response element */
responseP->subscribeResponse.duration = requestP->duration;
responseP->subscribeResponse.subscriptionId.set(oid.str());
responseP->subscribeResponse.throttling = requestP->throttling;
LM_SR(SccOk);
}
示例14: _run
bool _run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( cmdObj["replSetReconfig"].type() != Object ) {
errmsg = "no configuration specified";
return false;
}
bool force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
if( force && !theReplSet ) {
replSettings.reconfig = cmdObj["replSetReconfig"].Obj().getOwned();
result.append("msg", "will try this config momentarily, try running rs.conf() again in a few seconds");
return true;
}
if ( !check(errmsg, result) ) {
return false;
}
if( !force && !theReplSet->box.getState().primary() ) {
errmsg = "replSetReconfig command must be sent to the current replica set primary.";
return false;
}
{
// just make sure we can get a write lock before doing anything else. we'll reacquire one
// later. of course it could be stuck then, but this check lowers the risk if weird things
// are up - we probably don't want a change to apply 30 minutes after the initial attempt.
time_t t = time(0);
Lock::GlobalWrite lk;
if( time(0)-t > 20 ) {
errmsg = "took a long time to get write lock, so not initiating. Initiate when server less busy?";
return false;
}
}
try {
scoped_ptr<ReplSetConfig> newConfig
(ReplSetConfig::make(cmdObj["replSetReconfig"].Obj(), force));
log() << "replSet replSetReconfig config object parses ok, " <<
newConfig->members.size() << " members specified" << rsLog;
if( !ReplSetConfig::legalChange(theReplSet->getConfig(), *newConfig, errmsg) ) {
return false;
}
checkMembersUpForConfigChange(*newConfig, result, false);
log() << "replSet replSetReconfig [2]" << rsLog;
theReplSet->haveNewConfig(*newConfig, true);
ReplSet::startupStatusMsg.set("replSetReconfig'd");
}
catch( DBException& e ) {
log() << "replSet replSetReconfig exception: " << e.what() << rsLog;
throw;
}
catch( string& se ) {
log() << "replSet reconfig exception: " << se << rsLog;
errmsg = se;
return false;
}
resetSlaveCache();
return true;
}
示例15: _renameSingleNamespace
Status MMAPV1DatabaseCatalogEntry::renameCollection( OperationContext* txn,
const StringData& fromNS,
const StringData& toNS,
bool stayTemp ) {
Status s = _renameSingleNamespace( txn, fromNS, toNS, stayTemp );
if ( !s.isOK() )
return s;
NamespaceDetails* details = _namespaceIndex.details( toNS );
invariant( details );
RecordStoreV1Base* systemIndexRecordStore = _getIndexRecordStore( txn );
scoped_ptr<RecordIterator> it( systemIndexRecordStore->getIterator() );
while ( !it->isEOF() ) {
DiskLoc loc = it->getNext();
const Record* rec = it->recordFor( loc );
BSONObj oldIndexSpec( rec->data() );
if ( fromNS != oldIndexSpec["ns"].valuestrsafe() )
continue;
BSONObj newIndexSpec;
{
BSONObjBuilder b;
BSONObjIterator i( oldIndexSpec );
while( i.more() ) {
BSONElement e = i.next();
if ( strcmp( e.fieldName(), "ns" ) != 0 )
b.append( e );
else
b << "ns" << toNS;
}
newIndexSpec = b.obj();
}
StatusWith<DiskLoc> newIndexSpecLoc =
systemIndexRecordStore->insertRecord( txn,
newIndexSpec.objdata(),
newIndexSpec.objsize(),
-1 );
if ( !newIndexSpecLoc.isOK() )
return newIndexSpecLoc.getStatus();
const string& indexName = oldIndexSpec.getStringField( "name" );
{
// fix IndexDetails pointer
NamespaceDetailsCollectionCatalogEntry ce( toNS, details,
_getIndexRecordStore( txn ), this );
int indexI = ce._findIndexNumber( indexName );
IndexDetails& indexDetails = details->idx(indexI);
*txn->recoveryUnit()->writing(&indexDetails.info) = newIndexSpecLoc.getValue(); // XXX: dur
}
{
// move underlying namespac
string oldIndexNs = IndexDescriptor::makeIndexNamespace( fromNS, indexName );
string newIndexNs = IndexDescriptor::makeIndexNamespace( toNS, indexName );
Status s = _renameSingleNamespace( txn, oldIndexNs, newIndexNs, false );
if ( !s.isOK() )
return s;
}
systemIndexRecordStore->deleteRecord( txn, loc );
}
return Status::OK();
}