本文整理汇总了C++中BSONObj::objsize方法的典型用法代码示例。如果您正苦于以下问题:C++ BSONObj::objsize方法的具体用法?C++ BSONObj::objsize怎么用?C++ BSONObj::objsize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BSONObj
的用法示例。
在下文中一共展示了BSONObj::objsize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: writeMetadata
Status FTDCFileWriter::writeMetadata(const BSONObj& metadata) {
BSONObj wrapped = FTDCBSONUtil::createBSONMetadataDocument(metadata);
return writeArchiveFileBuffer({wrapped.objdata(), static_cast<size_t>(wrapped.objsize())});
}
示例2: _reduce
void JSReducer::_reduce( const BSONList& tuples , BSONObj& key , int& endSizeEstimate ) {
uassert( 10074 , "need values" , tuples.size() );
int sizeEstimate = ( tuples.size() * tuples.begin()->getField( "value" ).size() ) + 128;
BSONObjBuilder reduceArgs( sizeEstimate );
boost::scoped_ptr<BSONArrayBuilder> valueBuilder;
int sizeSoFar = 0;
unsigned n = 0;
for ( ; n<tuples.size(); n++ ) {
BSONObjIterator j(tuples[n]);
BSONElement keyE = j.next();
if ( n == 0 ) {
reduceArgs.append( keyE );
key = keyE.wrap();
sizeSoFar = 5 + keyE.size();
valueBuilder.reset(new BSONArrayBuilder( reduceArgs.subarrayStart( "tuples" ) ));
}
BSONElement ee = j.next();
uassert( 13070 , "value to large to reduce" , ee.size() < ( BSONObjMaxUserSize / 2 ) );
if ( sizeSoFar + ee.size() > BSONObjMaxUserSize ) {
assert( n > 1 ); // if not, inf. loop
break;
}
valueBuilder->append( ee );
sizeSoFar += ee.size();
}
assert(valueBuilder);
valueBuilder->done();
BSONObj args = reduceArgs.obj();
Scope * s = _func.scope();
s->invokeSafe( _func.func() , args );
if ( s->type( "return" ) == Array ) {
uasserted( 10075 , "reduce -> multiple not supported yet");
return;
}
endSizeEstimate = key.objsize() + ( args.objsize() / tuples.size() );
if ( n == tuples.size() )
return;
// the input list was too large
BSONList x;
for ( ; n < tuples.size(); n++ ) {
x.push_back( tuples[n] );
}
BSONObjBuilder temp( endSizeEstimate );
temp.append( key.firstElement() );
s->append( temp , "1" , "return" );
x.push_back( temp.obj() );
_reduce( x , key , endSizeEstimate );
}
示例3: insert
static void insert( const BSONObj &o, bool god = false ) {
Lock::DBWrite lk(ns());
Client::Context ctx( ns() );
theDataFileMgr.insert( ns(), o.objdata(), o.objsize(), god );
}
示例4: _logOpOld
static void _logOpOld(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb, bool fromMigrate ) {
Lock::DBWrite lk("local");
static BufBuilder bufbuilder(8*1024); // todo there is likely a mutex on this constructor
if ( strncmp(ns, "local.", 6) == 0 ) {
if ( strncmp(ns, "local.slaves", 12) == 0 ) {
resetSlaveCache();
}
return;
}
mutex::scoped_lock lk2(OpTime::m);
const OpTime ts = OpTime::now(lk2);
Client::Context context("",0,false);
/* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
instead we do a single copy to the destination position in the memory mapped file.
*/
bufbuilder.reset();
BSONObjBuilder b(bufbuilder);
b.appendTimestamp("ts", ts.asDate());
b.append("op", opstr);
b.append("ns", ns);
if (fromMigrate)
b.appendBool("fromMigrate", true);
if ( bb )
b.appendBool("b", *bb);
if ( o2 )
b.append("o2", *o2);
BSONObj partial = b.done(); // partial is everything except the o:... part.
int po_sz = partial.objsize();
int len = po_sz + obj.objsize() + 1 + 2 /*o:*/;
Record *r;
if( logNS == 0 ) {
logNS = "local.oplog.$main";
if ( localOplogMainDetails == 0 ) {
Client::Context ctx( logNS , dbpath, false);
localDB = ctx.db();
verify( localDB );
localOplogMainDetails = nsdetails(logNS);
verify( localOplogMainDetails );
}
Client::Context ctx( logNS , localDB, false );
r = theDataFileMgr.fast_oplog_insert(localOplogMainDetails, logNS, len);
}
else {
Client::Context ctx( logNS, dbpath, false );
verify( nsdetails( logNS ) );
// first we allocate the space, then we fill it below.
r = theDataFileMgr.fast_oplog_insert( nsdetails( logNS ), logNS, len);
}
append_O_Obj(r->data(), partial, obj);
context.getClient()->setLastOp( ts );
LOG( 6 ) << "logging op:" << BSONObj::make(r) << endl;
}
示例5: prepareToBuildIndex
/* Prepare to build an index. Does not actually build it (except for a special _id case).
- We validate that the params are good
- That the index does not already exist
- Creates the source collection if it DNE
example of 'io':
{ ns : 'test.foo', name : 'z', key : { z : 1 } }
throws DBException
@param sourceNS - source NS we are indexing
@param sourceCollection - its details ptr
@return true if ok to continue. when false we stop/fail silently (index already exists)
*/
bool prepareToBuildIndex(const BSONObj& io, bool god, string& sourceNS, NamespaceDetails *&sourceCollection, BSONObj& fixedIndexObject ) {
sourceCollection = 0;
// logical name of the index. todo: get rid of the name, we don't need it!
const char *name = io.getStringField("name");
uassert(12523, "no index name specified", *name);
// the collection for which we are building an index
sourceNS = io.getStringField("ns");
uassert(10096, "invalid ns to index", sourceNS.find( '.' ) != string::npos);
uassert(10097, "bad table to index name on add index attempt",
cc().database()->name == nsToDatabase(sourceNS.c_str()));
BSONObj key = io.getObjectField("key");
uassert(12524, "index key pattern too large", key.objsize() <= 2048);
if( !validKeyPattern(key) ) {
string s = string("bad index key pattern ") + key.toString();
uasserted(10098 , s.c_str());
}
if ( sourceNS.empty() || key.isEmpty() ) {
log(2) << "bad add index attempt name:" << (name?name:"") << "\n ns:" <<
sourceNS << "\n idxobj:" << io.toString() << endl;
string s = "bad add index attempt " + sourceNS + " key:" + key.toString();
uasserted(12504, s);
}
sourceCollection = nsdetails(sourceNS.c_str());
if( sourceCollection == 0 ) {
// try to create it
string err;
if ( !userCreateNS(sourceNS.c_str(), BSONObj(), err, false) ) {
problem() << "ERROR: failed to create collection while adding its index. " << sourceNS << endl;
return false;
}
sourceCollection = nsdetails(sourceNS.c_str());
tlog() << "info: creating collection " << sourceNS << " on add index" << endl;
assert( sourceCollection );
}
if ( sourceCollection->findIndexByName(name) >= 0 ) {
// index already exists.
return false;
}
if( sourceCollection->findIndexByKeyPattern(key) >= 0 ) {
log(2) << "index already exists with diff name " << name << ' ' << key.toString() << endl;
return false;
}
if ( sourceCollection->nIndexes >= NamespaceDetails::NIndexesMax ) {
stringstream ss;
ss << "add index fails, too many indexes for " << sourceNS << " key:" << key.toString();
string s = ss.str();
log() << s << '\n';
uasserted(12505,s);
}
/* we can't build a new index for the ns if a build is already in progress in the background -
EVEN IF this is a foreground build.
*/
uassert(12588, "cannot add index with a background operation in progress",
!BackgroundOperation::inProgForNs(sourceNS.c_str()));
/* this is because we want key patterns like { _id : 1 } and { _id : <someobjid> } to
all be treated as the same pattern.
*/
if ( IndexDetails::isIdIndexPattern(key) ) {
if( !god ) {
ensureHaveIdIndex( sourceNS.c_str() );
return false;
}
}
else {
/* is buildIndexes:false set for this replica set member?
if so we don't build any indexes except _id
*/
if( theReplSet && !theReplSet->buildIndexes() )
return false;
}
string pluginName = IndexPlugin::findPluginName( key );
IndexPlugin * plugin = pluginName.size() ? IndexPlugin::get( pluginName ) : 0;
if ( plugin ) {
fixedIndexObject = plugin->adjustIndexSpec( io );
//.........这里部分代码省略.........
示例6: run
//.........这里部分代码省略.........
if (!eDistanceMultiplier.eoo()) {
uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
distanceMultiplier = eDistanceMultiplier.number();
uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
}
BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) <<
"$dis" << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));
CanonicalQuery* cq;
const WhereCallbackReal whereCallback(txn, nss.db());
if (!CanonicalQuery::canonicalize(nss,
rewritten,
BSONObj(),
projObj,
0,
numWanted,
BSONObj(),
&cq,
whereCallback).isOK()) {
errmsg = "Can't parse filter / create query";
return false;
}
// Prevent chunks from being cleaned up during yields - this allows us to only check the
// version on initial entry into geoNear.
RangePreserver preserver(collection);
PlanExecutor* rawExec;
if (!getExecutor(txn, collection, cq, PlanExecutor::YIELD_AUTO, &rawExec, 0).isOK()) {
errmsg = "can't get query executor";
return false;
}
scoped_ptr<PlanExecutor> exec(rawExec);
double totalDistance = 0;
BSONObjBuilder resultBuilder(result.subarrayStart("results"));
double farthestDist = 0;
BSONObj currObj;
long long results = 0;
while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) {
// Come up with the correct distance.
double dist = currObj["$dis"].number() * distanceMultiplier;
totalDistance += dist;
if (dist > farthestDist) { farthestDist = dist; }
// Strip out '$dis' and '$pt' from the result obj. The rest gets added as 'obj'
// in the command result.
BSONObjIterator resIt(currObj);
BSONObjBuilder resBob;
while (resIt.more()) {
BSONElement elt = resIt.next();
if (!mongoutils::str::equals("$pt", elt.fieldName())
&& !mongoutils::str::equals("$dis", elt.fieldName())) {
resBob.append(elt);
}
}
BSONObj resObj = resBob.obj();
// Don't make a too-big result object.
if (resultBuilder.len() + resObj.objsize()> BSONObjMaxUserSize) {
warning() << "Too many geoNear results for query " << rewritten.toString()
<< ", truncating output.";
break;
}
// Add the next result to the result builder.
BSONObjBuilder oneResultBuilder(
resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
oneResultBuilder.append("dis", dist);
if (includeLocs) {
oneResultBuilder.appendAs(currObj["$pt"], "loc");
}
oneResultBuilder.append("obj", resObj);
oneResultBuilder.done();
++results;
}
resultBuilder.done();
// Fill out the stats subobj.
BSONObjBuilder stats(result.subobjStart("stats"));
// Fill in nscanned from the explain.
PlanSummaryStats summary;
Explain::getSummaryStats(exec.get(), &summary);
stats.appendNumber("nscanned", summary.totalKeysExamined);
stats.appendNumber("objectsLoaded", summary.totalDocsExamined);
stats.append("avgDistance", totalDistance / results);
stats.append("maxDistance", farthestDist);
stats.append("time", txn->getCurOp()->elapsedMillis());
stats.done();
return true;
}
示例7: sendNextBatch
bool ShardedClientCursor::sendNextBatch( Request& r , int ntoreturn ,
BufBuilder& buffer, int& docCount ) {
uassert( 10191 , "cursor already done" , ! _done );
int maxSize = 1024 * 1024;
if ( _totalSent > 0 )
maxSize *= 3;
docCount = 0;
// If ntoreturn is negative, it means that we should send up to -ntoreturn results
// back to the client, and that we should only send a *single batch*. An ntoreturn of
// 1 is also a special case which means "return up to 1 result in a single batch" (so
// that +1 actually has the same meaning of -1). For all other values of ntoreturn, we
// may have to return multiple batches.
const bool sendMoreBatches = ntoreturn == 0 || ntoreturn > 1;
ntoreturn = abs( ntoreturn );
bool cursorHasMore = true;
while ( ( cursorHasMore = _cursor->more() ) ) {
BSONObj o = _cursor->next();
buffer.appendBuf( (void*)o.objdata() , o.objsize() );
docCount++;
// Ensure that the next batch will never wind up requesting more docs from the shard
// than are remaining to satisfy the initial ntoreturn.
if (ntoreturn != 0) {
_cursor->setBatchSize(ntoreturn - docCount);
}
if ( buffer.len() > maxSize ) {
break;
}
if ( docCount == ntoreturn ) {
// soft limit aka batch size
break;
}
if ( ntoreturn == 0 && _totalSent == 0 && docCount >= 100 ) {
// first batch should be max 100 unless batch size specified
break;
}
}
// We need to request another batch if the following two conditions hold:
//
// 1. ntoreturn is positive and not equal to 1 (see the comment above). This condition
// is stored in 'sendMoreBatches'.
//
// 2. The last call to _cursor->more() was true (i.e. we never explicitly got a false
// value from _cursor->more()). This condition is stored in 'cursorHasMore'. If the server
// hits EOF while executing a query or a getmore, it will pass a cursorId of 0 in the
// query response to indicate that there are no more results. In this case, _cursor->more()
// will be explicitly false, and we know for sure that we do not have to send more batches.
//
// On the other hand, if _cursor->more() is true there may or may not be more results.
// Suppose that the mongod generates enough results to fill this batch. In this case it
// does not know whether not there are more, because doing so would require requesting an
// extra result and seeing whether we get EOF. The mongod sends a valid cursorId to
// indicate that there may be more. We do the same here: we indicate that there may be
// more results to retrieve by setting 'hasMoreBatches' to true.
bool hasMoreBatches = sendMoreBatches && cursorHasMore;
LOG(5) << "\t hasMoreBatches: " << hasMoreBatches
<< " sendMoreBatches: " << sendMoreBatches
<< " cursorHasMore: " << cursorHasMore
<< " ntoreturn: " << ntoreturn
<< " num: " << docCount
<< " id:" << getId()
<< " totalSent: " << _totalSent << endl;
_totalSent += docCount;
_done = ! hasMoreBatches;
return hasMoreBatches;
}
示例8: newGetMore
/**
* Also called by db/ops/query.cpp. This is the new getMore entry point.
*/
QueryResult* newGetMore(const char* ns, int ntoreturn, long long cursorid, CurOp& curop,
int pass, bool& exhaust, bool* isCursorAuthorized) {
exhaust = false;
int bufSize = 512 + sizeof(QueryResult) + MaxBytesToReturnToClientAtOnce;
BufBuilder bb(bufSize);
bb.skip(sizeof(QueryResult));
// This is a read lock. TODO: There is a cursor flag for not needing this. Do we care?
Client::ReadContext ctx(ns);
// TODO: Document.
replVerifyReadsOk();
ClientCursorPin ccPin(cursorid);
ClientCursor* cc = ccPin.c();
// These are set in the QueryResult msg we return.
int resultFlags = ResultFlag_AwaitCapable;
int numResults = 0;
int startingResult = 0;
if (NULL == cc) {
cursorid = 0;
resultFlags = ResultFlag_CursorNotFound;
}
else {
// Quote: check for spoofing of the ns such that it does not match the one originally
// there for the cursor
uassert(17011, "auth error", str::equals(ns, cc->ns().c_str()));
*isCursorAuthorized = true;
// TODO: fail point?
// If the operation that spawned this cursor had a time limit set, apply leftover
// time to this getmore.
curop.setMaxTimeMicros(cc->getLeftoverMaxTimeMicros());
// TODO:
// curop.debug().query = BSONForQuery
// curop.setQuery(curop.debug().query);
// TODO: What is pass?
if (0 == pass) { cc->updateSlaveLocation(curop); }
CollectionMetadataPtr collMetadata = cc->getCollMetadata();
// If we're replaying the oplog, we save the last time that we read.
OpTime slaveReadTill;
startingResult = cc->pos();
Runner* runner = cc->getRunner();
const ParsedQuery& pq = runner->getQuery().getParsed();
// Get results out of the runner.
// TODO: There may be special handling required for tailable cursors?
runner->restoreState();
BSONObj obj;
// TODO: Differentiate EOF from error.
while (runner->getNext(&obj)) {
// If we're sharded make sure that we don't return any data that hasn't been
// migrated off of our shard yet.
if (collMetadata) {
KeyPattern kp(collMetadata->getKeyPattern());
if (!collMetadata->keyBelongsToMe(kp.extractSingleKey(obj))) { continue; }
}
// Add result to output buffer.
bb.appendBuf((void*)obj.objdata(), obj.objsize());
// Count the result.
++numResults;
// Possibly note slave's position in the oplog.
if (pq.hasOption(QueryOption_OplogReplay)) {
BSONElement e = obj["ts"];
if (Date == e.type() || Timestamp == e.type()) {
slaveReadTill = e._opTime();
}
}
if ((numResults && numResults >= ntoreturn)
|| bb.len() > MaxBytesToReturnToClientAtOnce) {
break;
}
}
cc->incPos(numResults);
runner->saveState();
// Possibly note slave's position in the oplog.
if (pq.hasOption(QueryOption_OplogReplay) && !slaveReadTill.isNull()) {
cc->slaveReadTill(slaveReadTill);
}
exhaust = pq.hasOption(QueryOption_Exhaust);
//.........这里部分代码省略.........
示例9: newRunQuery
/**
* This is called by db/ops/query.cpp. This is the entry point for answering a query.
*/
string newRunQuery(Message& m, QueryMessage& q, CurOp& curop, Message &result) {
// This is a read lock.
Client::ReadContext ctx(q.ns, dbpath);
// Parse, canonicalize, plan, transcribe, and get a runner.
Runner* rawRunner;
Status status = getRunner(q, &rawRunner);
if (!status.isOK()) {
uasserted(17007, "Couldn't process query " + q.query.toString()
+ " why: " + status.reason());
}
verify(NULL != rawRunner);
auto_ptr<Runner> runner(rawRunner);
// We freak out later if this changes before we're done with the query.
const ChunkVersion shardingVersionAtStart = shardingState.getVersion(q.ns);
// We use this a lot below.
const ParsedQuery& pq = runner->getQuery().getParsed();
// TODO: Document why we do this.
replVerifyReadsOk(&pq);
// If this exists, the collection is sharded.
// If it doesn't exist, we can assume we're not sharded.
// If we're sharded, we might encounter data that is not consistent with our sharding state.
// We must ignore this data.
CollectionMetadataPtr collMetadata;
if (!shardingState.needCollectionMetadata(pq.ns())) {
collMetadata = CollectionMetadataPtr();
}
else {
collMetadata = shardingState.getCollectionMetadata(pq.ns());
}
// Run the query.
BufBuilder bb(32768);
bb.skip(sizeof(QueryResult));
// How many results have we obtained from the runner?
int numResults = 0;
// If we're replaying the oplog, we save the last time that we read.
OpTime slaveReadTill;
// Do we save the Runner in a ClientCursor for getMore calls later?
bool saveClientCursor = false;
BSONObj obj;
// TODO: Differentiate EOF from error.
while (runner->getNext(&obj)) {
// If we're sharded make sure that we don't return any data that hasn't been migrated
// off of our shared yet.
if (collMetadata) {
// This information can change if we yield and as such we must make sure to re-fetch
// it if we yield.
KeyPattern kp(collMetadata->getKeyPattern());
// This performs excessive BSONObj creation but that's OK for now.
if (!collMetadata->keyBelongsToMe(kp.extractSingleKey(obj))) { continue; }
}
// Add result to output buffer.
bb.appendBuf((void*)obj.objdata(), obj.objsize());
// Count the result.
++numResults;
// Possibly note slave's position in the oplog.
if (pq.hasOption(QueryOption_OplogReplay)) {
BSONElement e = obj["ts"];
if (Date == e.type() || Timestamp == e.type()) {
slaveReadTill = e._opTime();
}
}
// TODO: only one type of 2d search doesn't support this. We need a way to pull it out
// of CanonicalQuery. :(
const bool supportsGetMore = true;
const bool isExplain = pq.isExplain();
if (isExplain && pq.enoughForExplain(numResults)) {
break;
}
else if (!supportsGetMore && (pq.enough(numResults)
|| bb.len() >= MaxBytesToReturnToClientAtOnce)) {
break;
}
else if (pq.enoughForFirstBatch(numResults, bb.len())) {
// If only one result requested assume it's a findOne() and don't save the cursor.
if (pq.wantMore() && 1 != pq.getNumToReturn()) {
saveClientCursor = true;
}
break;
}
}
// TODO: Stage creation can set tailable depending on what's in the parsed query. We have
// the full parsed query available during planning...set it there.
//.........这里部分代码省略.........
示例10: run
int run() {
if ( ! hasParam( "from" ) ) {
log() << "need to specify --from" << endl;
return -1;
}
Client::initThread( "oplogreplay" );
log() << "going to connect" << endl;
OplogReader r(false);
r.setTailingQueryOptions( QueryOption_SlaveOk | QueryOption_AwaitData );
r.connect( getParam( "from" ) );
log() << "connected" << endl;
OpTime start( time(0) - getParam( "seconds" , 86400 ) , 0 );
log() << "starting from " << start.toStringPretty() << endl;
string ns = getParam( "oplogns" );
r.tailingQueryGTE( ns.c_str() , start );
bool legacyApplyOps = (versionCmp(mongodVersion(), "2.2.0") < 0);
int num = 0;
while ( r.more() ) {
BSONObj o = r.next();
LOG(2) << o << endl;
if ( o["$err"].type() ) {
log() << "error getting oplog" << endl;
log() << o << endl;
return -1;
}
bool print = ++num % 100000 == 0;
if ( print )
cout << num << "\t" << o << endl;
if ( o["op"].String() == "n" )
continue;
string dbname = legacyApplyOps? nsToDatabase(o["ns"].String()) : "admin";
BSONObjBuilder b( o.objsize() + 32 );
BSONArrayBuilder updates( b.subarrayStart( "applyOps" ) );
updates.append( o );
updates.done();
BSONObj c = b.obj();
BSONObj res;
bool ok = conn().runCommand( dbname , c , res );
if ( print || ! ok )
log() << res << endl;
}
return 0;
}
示例11: _compactExtent
void Collection::_compactExtent(const DiskLoc diskloc, int extentNumber,
vector<IndexAccessMethod*>& indexesToInsertTo,
const CompactOptions* compactOptions, CompactStats* stats ) {
log() << "compact begin extent #" << extentNumber
<< " for namespace " << _ns << " " << diskloc;
unsigned oldObjSize = 0; // we'll report what the old padding was
unsigned oldObjSizeWithPadding = 0;
Extent *e = diskloc.ext();
e->assertOk();
verify( e->validates(diskloc) );
{
// the next/prev pointers within the extent might not be in order so we first
// page the whole thing in sequentially
log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
Timer t;
size_t length = e->length;
touch_pages( reinterpret_cast<const char*>(e), length );
int ms = t.millis();
if( ms > 1000 )
log() << "compact end paging in " << ms << "ms "
<< e->length/1000000.0/t.seconds() << "MB/sec" << endl;
}
{
log() << "compact copying records" << endl;
long long datasize = 0;
long long nrecords = 0;
DiskLoc L = e->firstRecord;
if( !L.isNull() ) {
while( 1 ) {
Record *recOld = L.rec();
L = getExtentManager()->getNextRecordInExtent(L);
BSONObj objOld = BSONObj::make(recOld);
if ( compactOptions->validateDocuments && !objOld.valid() ) {
// object is corrupt!
log() << "compact skipping corrupt document!";
stats->corruptDocuments++;
}
else {
unsigned docSize = objOld.objsize();
nrecords++;
oldObjSize += docSize;
oldObjSizeWithPadding += recOld->netLength();
unsigned lenWHdr = docSize + Record::HeaderSize;
unsigned lenWPadding = lenWHdr;
switch( compactOptions->paddingMode ) {
case CompactOptions::NONE:
if ( details()->isUserFlagSet(NamespaceDetails::Flag_UsePowerOf2Sizes) )
lenWPadding = details()->quantizePowerOf2AllocationSpace(lenWPadding);
break;
case CompactOptions::PRESERVE:
// if we are preserving the padding, the record should not change size
lenWPadding = recOld->lengthWithHeaders();
break;
case CompactOptions::MANUAL:
lenWPadding = compactOptions->computeRecordSize(lenWPadding);
if (lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
lenWPadding = lenWHdr;
}
break;
}
CompactDocWriter writer( objOld, lenWPadding );
StatusWith<DiskLoc> status = _recordStore->insertRecord( &writer, 0 );
uassertStatusOK( status.getStatus() );
datasize += _recordStore->recordFor( status.getValue() )->netLength();
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed = true; // in compact we should be doing no checking
for ( size_t i = 0; i < indexesToInsertTo.size(); i++ ) {
Status idxStatus = indexesToInsertTo[i]->insert( objOld,
status.getValue(),
options,
NULL );
uassertStatusOK( idxStatus );
}
}
if( L.isNull() ) {
// we just did the very last record from the old extent. it's still pointed to
// by the old extent ext, but that will be fixed below after this loop
break;
}
// remove the old records (orphan them) periodically so our commit block doesn't get too large
bool stopping = false;
RARELY stopping = *killCurrentOp.checkForInterruptNoAssert() != 0;
if( stopping || getDur().aCommitIsNeeded() ) {
e->firstRecord.writing() = L;
//.........这里部分代码省略.........
示例12: ctx
/**
* Run a query -- includes checking for and running a Command.
* @return points to ns if exhaust mode. 0=normal mode
* @locks the db mutex for reading (and potentially for writing temporarily to create a new db).
* @yields the db mutex periodically after acquiring it.
* @asserts on scan and order memory exhaustion and other cases.
*/
const char *runQuery(Message& m, QueryMessage& q, CurOp& curop, Message &result) {
shared_ptr<ParsedQuery> pq_shared( new ParsedQuery(q) );
ParsedQuery& pq( *pq_shared );
BSONObj jsobj = q.query;
int queryOptions = q.queryOptions;
const char *ns = q.ns;
if( logLevel >= 2 )
log() << "runQuery called " << ns << " " << jsobj << endl;
curop.debug().ns = ns;
curop.debug().ntoreturn = pq.getNumToReturn();
curop.debug().query = jsobj;
curop.setQuery(jsobj);
// Run a command.
if ( pq.couldBeCommand() ) {
BufBuilder bb;
bb.skip(sizeof(QueryResult));
BSONObjBuilder cmdResBuf;
if ( runCommands(ns, jsobj, curop, bb, cmdResBuf, false, queryOptions) ) {
curop.debug().iscommand = true;
curop.debug().query = jsobj;
curop.markCommand();
auto_ptr< QueryResult > qr;
qr.reset( (QueryResult *) bb.buf() );
bb.decouple();
qr->setResultFlagsToOk();
qr->len = bb.len();
curop.debug().responseLength = bb.len();
qr->setOperation(opReply);
qr->cursorId = 0;
qr->startingFrom = 0;
qr->nReturned = 1;
result.setData( qr.release(), true );
}
else {
uasserted(13530, "bad or malformed command request?");
}
return 0;
}
bool explain = pq.isExplain();
BSONObj order = pq.getOrder();
BSONObj query = pq.getFilter();
/* The ElemIter will not be happy if this isn't really an object. So throw exception
here when that is true.
(Which may indicate bad data from client.)
*/
if ( query.objsize() == 0 ) {
out() << "Bad query object?\n jsobj:";
out() << jsobj.toString() << "\n query:";
out() << query.toString() << endl;
uassert( 10110 , "bad query object", false);
}
Client::ReadContext ctx( ns , dbpath ); // read locks
const ConfigVersion shardingVersionAtStart = shardingState.getVersion( ns );
replVerifyReadsOk(&pq);
if ( pq.hasOption( QueryOption_CursorTailable ) ) {
NamespaceDetails *d = nsdetails( ns );
uassert( 13051, "tailable cursor requested on non capped collection", d && d->isCapped() );
const BSONObj nat1 = BSON( "$natural" << 1 );
if ( order.isEmpty() ) {
order = nat1;
}
else {
uassert( 13052, "only {$natural:1} order allowed for tailable cursor", order == nat1 );
}
}
// Run a simple id query.
if ( ! (explain || pq.showDiskLoc()) && isSimpleIdQuery( query ) && !pq.hasOption( QueryOption_CursorTailable ) ) {
int n = 0;
bool nsFound = false;
bool indexFound = false;
BSONObj resObject;
Client& c = cc();
bool found = Helpers::findById( c, ns , query , resObject , &nsFound , &indexFound );
if ( nsFound == false || indexFound == true ) {
if ( shardingState.needShardChunkManager( ns ) ) {
ShardChunkManagerPtr m = shardingState.getShardChunkManager( ns );
if ( m && ! m->belongsToMe( resObject ) ) {
// I have something this _id
//.........这里部分代码省略.........
示例13: cloneCollectionAsCapped
Status cloneCollectionAsCapped( OperationContext* txn,
Database* db,
const string& shortFrom,
const string& shortTo,
double size,
bool temp,
bool logForReplication ) {
string fromNs = db->name() + "." + shortFrom;
string toNs = db->name() + "." + shortTo;
Collection* fromCollection = db->getCollection( txn, fromNs );
if ( !fromCollection )
return Status( ErrorCodes::NamespaceNotFound,
str::stream() << "source collection " << fromNs << " does not exist" );
if ( db->getCollection( toNs ) )
return Status( ErrorCodes::NamespaceExists, "to collection already exists" );
// create new collection
{
Client::Context ctx( toNs );
BSONObjBuilder spec;
spec.appendBool( "capped", true );
spec.append( "size", size );
if ( temp )
spec.appendBool( "temp", true );
Status status = userCreateNS( txn, ctx.db(), toNs, spec.done(), logForReplication );
if ( !status.isOK() )
return status;
}
Collection* toCollection = db->getCollection( txn, toNs );
invariant( toCollection ); // we created above
// how much data to ignore because it won't fit anyway
// datasize and extentSize can't be compared exactly, so add some padding to 'size'
long long excessSize =
static_cast<long long>( fromCollection->dataSize() -
( toCollection->getRecordStore()->storageSize() * 2 ) );
scoped_ptr<Runner> runner( InternalPlanner::collectionScan(fromNs,
fromCollection,
InternalPlanner::FORWARD ) );
while ( true ) {
BSONObj obj;
Runner::RunnerState state = runner->getNext(&obj, NULL);
switch( state ) {
case Runner::RUNNER_EOF:
return Status::OK();
case Runner::RUNNER_DEAD:
db->dropCollection( txn, toNs );
return Status( ErrorCodes::InternalError, "runner turned dead while iterating" );
case Runner::RUNNER_ERROR:
return Status( ErrorCodes::InternalError, "runner error while iterating" );
case Runner::RUNNER_ADVANCED:
if ( excessSize > 0 ) {
excessSize -= ( 4 * obj.objsize() ); // 4x is for padding, power of 2, etc...
continue;
}
toCollection->insertDocument( txn, obj, true );
if ( logForReplication )
replset::logOp(txn, "i", toNs.c_str(), obj);
txn->recoveryUnit()->commitIfNeeded();
}
}
invariant( false ); // unreachable
}
示例14: newRunQuery
//.........这里部分代码省略.........
// Run the query.
// bb is used to hold query results
// this buffer should contain either requested documents per query or
// explain information, but not both
BufBuilder bb(32768);
bb.skip(sizeof(QueryResult));
// How many results have we obtained from the runner?
int numResults = 0;
// If we're replaying the oplog, we save the last time that we read.
OpTime slaveReadTill;
// Do we save the Runner in a ClientCursor for getMore calls later?
bool saveClientCursor = false;
// We turn on auto-yielding for the runner here. The runner registers itself with the
// active runners list in ClientCursor.
auto_ptr<ScopedRunnerRegistration> safety(new ScopedRunnerRegistration(runner.get()));
runner->setYieldPolicy(Runner::YIELD_AUTO);
BSONObj obj;
Runner::RunnerState state;
// uint64_t numMisplacedDocs = 0;
// set this outside loop. we will need to use this both within loop and when deciding
// to fill in explain information
const bool isExplain = pq.isExplain();
while (Runner::RUNNER_ADVANCED == (state = runner->getNext(&obj, NULL))) {
// Add result to output buffer. This is unnecessary if explain info is requested
if (!isExplain) {
bb.appendBuf((void*)obj.objdata(), obj.objsize());
}
// Count the result.
++numResults;
// Possibly note slave's position in the oplog.
if (pq.hasOption(QueryOption_OplogReplay)) {
BSONElement e = obj["ts"];
if (Date == e.type() || Timestamp == e.type()) {
slaveReadTill = e._opTime();
}
}
// TODO: only one type of 2d search doesn't support this. We need a way to pull it out
// of CanonicalQuery. :(
const bool supportsGetMore = true;
if (isExplain) {
if (enoughForExplain(pq, numResults)) {
break;
}
}
else if (!supportsGetMore && (enough(pq, numResults)
|| bb.len() >= MaxBytesToReturnToClientAtOnce)) {
break;
}
else if (enoughForFirstBatch(pq, numResults, bb.len())) {
QLOG() << "Enough for first batch, wantMore=" << pq.wantMore()
<< " numToReturn=" << pq.getNumToReturn()
<< " numResults=" << numResults
<< endl;
// If only one result requested assume it's a findOne() and don't save the cursor.
if (pq.wantMore() && 1 != pq.getNumToReturn()) {
示例15: runQuery
std::string runQuery(OperationContext* opCtx,
QueryMessage& q,
const NamespaceString& nss,
Message& result) {
CurOp& curOp = *CurOp::get(opCtx);
curOp.ensureStarted();
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Invalid ns [" << nss.ns() << "]",
nss.isValid());
invariant(!nss.isCommand());
// Set CurOp information.
const auto upconvertedQuery = upconvertQueryEntry(q.query, nss, q.ntoreturn, q.ntoskip);
beginQueryOp(opCtx, nss, upconvertedQuery, q.ntoreturn, q.ntoskip);
// Parse the qm into a CanonicalQuery.
const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
CanonicalQuery::canonicalize(opCtx,
q,
expCtx,
ExtensionsCallbackReal(opCtx, &nss),
MatchExpressionParser::kAllowAllSpecialFeatures);
if (!statusWithCQ.isOK()) {
uasserted(17287,
str::stream() << "Can't canonicalize query: "
<< statusWithCQ.getStatus().toString());
}
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
invariant(cq.get());
LOG(5) << "Running query:\n" << redact(cq->toString());
LOG(2) << "Running query: " << redact(cq->toStringShort());
// Parse, canonicalize, plan, transcribe, and get a plan executor.
AutoGetCollectionForReadCommand ctx(opCtx, nss, AutoGetCollection::ViewMode::kViewsForbidden);
Collection* const collection = ctx.getCollection();
{
const QueryRequest& qr = cq->getQueryRequest();
// Allow the query to run on secondaries if the read preference permits it. If no read
// preference was specified, allow the query to run iff slaveOk has been set.
const bool slaveOK = qr.hasReadPref()
? uassertStatusOK(ReadPreferenceSetting::fromContainingBSON(q.query))
.canRunOnSecondary()
: qr.isSlaveOk();
uassertStatusOK(
repl::ReplicationCoordinator::get(opCtx)->checkCanServeReadsFor(opCtx, nss, slaveOK));
}
// We have a parsed query. Time to get the execution plan for it.
auto exec = uassertStatusOK(getExecutorLegacyFind(opCtx, collection, nss, std::move(cq)));
const QueryRequest& qr = exec->getCanonicalQuery()->getQueryRequest();
// If it's actually an explain, do the explain and return rather than falling through
// to the normal query execution loop.
if (qr.isExplain()) {
BufBuilder bb;
bb.skip(sizeof(QueryResult::Value));
BSONObjBuilder explainBob;
Explain::explainStages(
exec.get(), collection, ExplainOptions::Verbosity::kExecAllPlans, &explainBob);
// Add the resulting object to the return buffer.
BSONObj explainObj = explainBob.obj();
bb.appendBuf((void*)explainObj.objdata(), explainObj.objsize());
// Set query result fields.
QueryResult::View qr = bb.buf();
qr.setResultFlagsToOk();
qr.msgdata().setLen(bb.len());
curOp.debug().responseLength = bb.len();
qr.msgdata().setOperation(opReply);
qr.setCursorId(0);
qr.setStartingFrom(0);
qr.setNReturned(1);
result.setData(bb.release());
return "";
}
// Handle query option $maxTimeMS (not used with commands).
if (qr.getMaxTimeMS() > 0) {
uassert(40116,
"Illegal attempt to set operation deadline within DBDirectClient",
!opCtx->getClient()->isInDirectClient());
opCtx->setDeadlineAfterNowBy(Milliseconds{qr.getMaxTimeMS()});
}
opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
// Run the query.
// bb is used to hold query results
// this buffer should contain either requested documents per query or
// explain information, but not both
BufBuilder bb(FindCommon::kInitReplyBufferSize);
bb.skip(sizeof(QueryResult::Value));
//.........这里部分代码省略.........