本文整理汇总了C++中BSONObj::nFields方法的典型用法代码示例。如果您正苦于以下问题:C++ BSONObj::nFields方法的具体用法?C++ BSONObj::nFields怎么用?C++ BSONObj::nFields使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BSONObj
的用法示例。
在下文中一共展示了BSONObj::nFields方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: make
// static
Status ParsedProjection::make(const BSONObj& spec,
const MatchExpression* const query,
ParsedProjection** out,
const MatchExpressionParser::WhereCallback& whereCallback) {
// Are we including or excluding fields? Values:
// -1 when we haven't initialized it.
// 1 when we're including
// 0 when we're excluding.
int include_exclude = -1;
// If any of these are 'true' the projection isn't covered.
bool include = true;
bool hasNonSimple = false;
bool hasDottedField = false;
bool includeID = true;
bool hasIndexKeyProjection = false;
bool wantGeoNearPoint = false;
bool wantGeoNearDistance = false;
// Until we see a positional or elemMatch operator we're normal.
ArrayOpType arrayOpType = ARRAY_OP_NORMAL;
BSONObjIterator it(spec);
while (it.more()) {
BSONElement e = it.next();
if (!e.isNumber() && !e.isBoolean()) {
hasNonSimple = true;
}
if (Object == e.type()) {
BSONObj obj = e.embeddedObject();
if (1 != obj.nFields()) {
return Status(ErrorCodes::BadValue, ">1 field in obj: " + obj.toString());
}
BSONElement e2 = obj.firstElement();
if (mongoutils::str::equals(e2.fieldName(), "$slice")) {
if (e2.isNumber()) {
// This is A-OK.
}
else if (e2.type() == Array) {
BSONObj arr = e2.embeddedObject();
if (2 != arr.nFields()) {
return Status(ErrorCodes::BadValue, "$slice array wrong size");
}
BSONObjIterator it(arr);
// Skip over 'skip'.
it.next();
int limit = it.next().numberInt();
if (limit <= 0) {
return Status(ErrorCodes::BadValue, "$slice limit must be positive");
}
}
else {
return Status(ErrorCodes::BadValue,
"$slice only supports numbers and [skip, limit] arrays");
}
}
else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) {
// Validate $elemMatch arguments and dependencies.
if (Object != e2.type()) {
return Status(ErrorCodes::BadValue,
"elemMatch: Invalid argument, object required.");
}
if (ARRAY_OP_POSITIONAL == arrayOpType) {
return Status(ErrorCodes::BadValue,
"Cannot specify positional operator and $elemMatch.");
}
if (mongoutils::str::contains(e.fieldName(), '.')) {
return Status(ErrorCodes::BadValue,
"Cannot use $elemMatch projection on a nested field.");
}
arrayOpType = ARRAY_OP_ELEM_MATCH;
// Create a MatchExpression for the elemMatch.
BSONObj elemMatchObj = e.wrap();
verify(elemMatchObj.isOwned());
// TODO: Is there a faster way of validating the elemMatchObj?
StatusWithMatchExpression swme = MatchExpressionParser::parse(elemMatchObj,
whereCallback);
if (!swme.isOK()) {
return swme.getStatus();
}
delete swme.getValue();
}
else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
// Field for meta must be top level. We can relax this at some point.
if (mongoutils::str::contains(e.fieldName(), '.')) {
return Status(ErrorCodes::BadValue, "field for $meta cannot be nested");
}
//.........这里部分代码省略.........
示例2: it
ProjectionExec::ProjectionExec(const BSONObj& spec,
const MatchExpression* queryExpression,
const CollatorInterface* collator,
const ExtensionsCallback& extensionsCallback)
: _include(true),
_special(false),
_source(spec),
_includeID(true),
_skip(0),
_limit(-1),
_arrayOpType(ARRAY_OP_NORMAL),
_queryExpression(queryExpression),
_hasReturnKey(false),
_collator(collator) {
// Whether we're including or excluding fields.
enum class IncludeExclude { kUninitialized, kInclude, kExclude };
IncludeExclude includeExclude = IncludeExclude::kUninitialized;
BSONObjIterator it(_source);
while (it.more()) {
BSONElement e = it.next();
if (Object == e.type()) {
BSONObj obj = e.embeddedObject();
verify(1 == obj.nFields());
BSONElement e2 = obj.firstElement();
if (mongoutils::str::equals(e2.fieldName(), "$slice")) {
if (e2.isNumber()) {
int i = e2.numberInt();
if (i < 0) {
add(e.fieldName(), i, -i); // limit is now positive
} else {
add(e.fieldName(), 0, i);
}
} else {
verify(e2.type() == Array);
BSONObj arr = e2.embeddedObject();
verify(2 == arr.nFields());
BSONObjIterator it(arr);
int skip = it.next().numberInt();
int limit = it.next().numberInt();
verify(limit > 0);
add(e.fieldName(), skip, limit);
}
} else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) {
_arrayOpType = ARRAY_OP_ELEM_MATCH;
// Create a MatchExpression for the elemMatch.
BSONObj elemMatchObj = e.wrap();
verify(elemMatchObj.isOwned());
_elemMatchObjs.push_back(elemMatchObj);
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(elemMatchObj, extensionsCallback, _collator);
verify(statusWithMatcher.isOK());
// And store it in _matchers.
_matchers[mongoutils::str::before(e.fieldName(), '.').c_str()] =
statusWithMatcher.getValue().release();
add(e.fieldName(), true);
} else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
verify(String == e2.type());
if (e2.valuestr() == QueryRequest::metaTextScore) {
_meta[e.fieldName()] = META_TEXT_SCORE;
} else if (e2.valuestr() == QueryRequest::metaSortKey) {
_sortKeyMetaFields.push_back(e.fieldName());
_meta[_sortKeyMetaFields.back()] = META_SORT_KEY;
} else if (e2.valuestr() == QueryRequest::metaRecordId) {
_meta[e.fieldName()] = META_RECORDID;
} else if (e2.valuestr() == QueryRequest::metaGeoNearPoint) {
_meta[e.fieldName()] = META_GEONEAR_POINT;
} else if (e2.valuestr() == QueryRequest::metaGeoNearDistance) {
_meta[e.fieldName()] = META_GEONEAR_DIST;
} else if (e2.valuestr() == QueryRequest::metaIndexKey) {
_hasReturnKey = true;
} else {
// This shouldn't happen, should be caught by parsing.
verify(0);
}
} else {
verify(0);
}
} else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) {
_includeID = false;
} else {
add(e.fieldName(), e.trueValue());
// If we haven't specified an include/exclude, initialize includeExclude.
if (includeExclude == IncludeExclude::kUninitialized) {
includeExclude =
e.trueValue() ? IncludeExclude::kInclude : IncludeExclude::kExclude;
_include = !e.trueValue();
}
}
if (mongoutils::str::contains(e.fieldName(), ".$")) {
_arrayOpType = ARRAY_OP_POSITIONAL;
//.........这里部分代码省略.........
示例3: _update
void _update( Request& r , DbMessage& d, ChunkManager* manager ){
int flags = d.pullInt();
BSONObj query = d.nextJsObj();
uassert( 10201 , "invalid update" , d.moreJSObjs() );
BSONObj toupdate = d.nextJsObj();
BSONObj chunkFinder = query;
bool upsert = flags & UpdateOption_Upsert;
bool multi = flags & UpdateOption_Multi;
if ( multi )
uassert( 10202 , "can't mix multi and upsert and sharding" , ! upsert );
if ( upsert && !(manager->hasShardKey(toupdate) ||
(toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query))))
{
throw UserException( 8012 , "can't upsert something without shard key" );
}
bool save = false;
if ( ! manager->hasShardKey( query ) ){
if ( multi ){
}
else if ( query.nFields() != 1 || strcmp( query.firstElement().fieldName() , "_id" ) ){
throw UserException( 8013 , "can't do update with query that doesn't have the shard key" );
}
else {
save = true;
chunkFinder = toupdate;
}
}
if ( ! save ){
if ( toupdate.firstElement().fieldName()[0] == '$' ){
// TODO: check for $set, etc.. on shard key
}
else if ( manager->hasShardKey( toupdate ) && manager->getShardKey().compare( query , toupdate ) ){
throw UserException( 8014 , "change would move shards!" );
}
}
if ( multi ){
vector<Chunk*> chunks;
manager->getChunksForQuery( chunks , chunkFinder );
set<string> seen;
for ( vector<Chunk*>::iterator i=chunks.begin(); i!=chunks.end(); i++){
Chunk * c = *i;
if ( seen.count( c->getShard() ) )
continue;
doWrite( dbUpdate , r , c->getShard() );
seen.insert( c->getShard() );
}
}
else {
Chunk& c = manager->findChunk( chunkFinder );
doWrite( dbUpdate , r , c.getShard() );
c.splitIfShould( d.msg().data->dataLen() );
}
}
示例4: interpreterVersion
BSONObj interpreterVersion(const BSONObj& a, void* data) {
uassert( 16453, "interpreterVersion accepts no arguments", a.nFields() == 0 );
return BSON( "" << globalScriptEngine->getInterpreterVersionString() );
}
示例5: _updateObjects
//.........这里部分代码省略.........
mss->applyModsInPlace( true );// const_cast<BSONObj&>(onDisk) );
DEBUGUPDATE( "\t\t\t doing in place update" );
if ( profile && !multi )
debug.fastmod = true;
if ( modsIsIndexed ) {
seenObjects.insert( loc );
}
d->paddingFits();
}
else {
if ( rs )
rs->goingToDelete( onDisk );
BSONObj newObj = mss->createNewFromMods();
checkTooLarge(newObj);
DiskLoc newLoc = theDataFileMgr.updateRecord(ns,
d,
nsdt,
r,
loc,
newObj.objdata(),
newObj.objsize(),
debug);
if ( newLoc != loc || modsIsIndexed ){
// log() << "Moved obj " << newLoc.obj()["_id"] << " from " << loc << " to " << newLoc << endl;
// object moved, need to make sure we don' get again
seenObjects.insert( newLoc );
}
}
if ( logop ) {
DEV verify( mods->size() );
BSONObj logObj = mss->getOpLogRewrite();
DEBUGUPDATE( "\t rewrite update: " << logObj );
// It is possible that the entire mod set was a no-op over this
// document. We would have an empty log record in that case. If we
// call logOp, with an empty record, that would be replicated as "clear
// this record", which is not what we want. Therefore, to get a no-op
// in the replica, we simply don't log.
if ( logObj.nFields() ) {
logOp("u", ns, logObj , &pattern, 0, fromMigrate );
}
}
numModded++;
if ( ! multi )
return UpdateResult( 1 , 1 , numModded , BSONObj() );
if ( willAdvanceCursor )
c->recoverFromTouchingEarlierIterate();
getDur().commitIfNeeded();
continue;
}
uassert( 10158 , "multi update only works with $ operators" , ! multi );
BSONElementManipulator::lookForTimestamps( updateobj );
checkNoMods( updateobj );
theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug, su);
if ( logop ) {
DEV wassert( !su ); // super used doesn't get logged, this would be bad.
logOp("u", ns, updateobj, &pattern, 0, fromMigrate );
}
return UpdateResult( 1 , 0 , 1 , BSONObj() );
} while ( c->ok() );
} // endif
if ( numModded )
return UpdateResult( 1 , 1 , numModded , BSONObj() );
if ( upsert ) {
if ( updateobj.firstElementFieldName()[0] == '$' ) {
// upsert of an $operation. build a default object
BSONObj newObj = mods->createNewFromQuery( patternOrig );
checkNoMods( newObj );
debug.fastmodinsert = true;
theDataFileMgr.insertWithObjMod(ns, newObj, false, su);
if ( logop )
logOp( "i", ns, newObj, 0, 0, fromMigrate );
return UpdateResult( 0 , 1 , 1 , newObj );
}
uassert( 10159 , "multi update only works with $ operators" , ! multi );
checkNoMods( updateobj );
debug.upsert = true;
BSONObj no = updateobj;
theDataFileMgr.insertWithObjMod(ns, no, false, su);
if ( logop )
logOp( "i", ns, no, 0, 0, fromMigrate );
return UpdateResult( 0 , 0 , 1 , no );
}
return UpdateResult( 0 , isOperatorUpdate , 0 , BSONObj() );
}
示例6: computeProperties
void IndexScanNode::computeProperties() {
_sorts.clear();
BSONObj sortPattern;
{
BSONObjBuilder sortBob;
BSONObj normalizedIndexKeyPattern(LiteParsedQuery::normalizeSortOrder(indexKeyPattern));
BSONObjIterator it(normalizedIndexKeyPattern);
while (it.more()) {
BSONElement elt = it.next();
// Zero is returned if elt is not a number. This happens when elt is hashed or
// 2dsphere, our two projection indices. We want to drop those from the sort
// pattern.
int val = elt.numberInt() * direction;
if (0 != val) {
sortBob.append(elt.fieldName(), val);
}
}
sortPattern = sortBob.obj();
}
_sorts.insert(sortPattern);
const int nFields = sortPattern.nFields();
if (nFields > 1) {
// We're sorted not only by sortPattern but also by all prefixes of it.
for (int i = 0; i < nFields; ++i) {
// Make obj out of fields [0,i]
BSONObjIterator it(sortPattern);
BSONObjBuilder prefixBob;
for (int j = 0; j <= i; ++j) {
prefixBob.append(it.next());
}
_sorts.insert(prefixBob.obj());
}
}
// If we are using the index {a:1, b:1} to answer the predicate {a: 10}, it's sorted
// both by the index key pattern and by the pattern {b: 1}.
// See if there are any fields with equalities for bounds. We can drop these
// from any sort orders created.
set<string> equalityFields;
if (!bounds.isSimpleRange) {
// Figure out how many fields are point intervals.
for (size_t i = 0; i < bounds.fields.size(); ++i) {
const OrderedIntervalList& oil = bounds.fields[i];
if (oil.intervals.size() != 1) {
continue;
}
const Interval& ival = oil.intervals[0];
if (!ival.isPoint()) {
continue;
}
equalityFields.insert(oil.name);
}
}
if (equalityFields.empty()) {
return;
}
// TODO: Each field in equalityFields could be dropped from the sort order since it is
// a point interval. The full set of sort orders is as follows:
// For each sort in _sorts:
// For each drop in powerset(equalityFields):
// Remove fields in 'drop' from 'sort' and add resulting sort to output.
// Since this involves a powerset, we only remove point intervals that the prior sort
// planning code removed, namely the contiguous prefix of the key pattern.
BSONObjIterator it(sortPattern);
BSONObjBuilder prefixBob;
while (it.more()) {
BSONElement elt = it.next();
// TODO: string slowness. fix when bounds are stringdata not string.
if (equalityFields.end() == equalityFields.find(string(elt.fieldName()))) {
prefixBob.append(elt);
// This field isn't a point interval, can't drop.
break;
}
}
while (it.more()) {
prefixBob.append(it.next());
}
// If we have an index {a:1} and an equality on 'a' don't append an empty sort order.
BSONObj filterPointsObj = prefixBob.obj();
if (!filterPointsObj.isEmpty()) {
_sorts.insert(filterPointsObj);
}
}
示例7: it
ProjectionExec::ProjectionExec(const BSONObj& spec, const MatchExpression* queryExpression)
: _include(true),
_special(false),
_source(spec),
_includeID(true),
_skip(0),
_limit(-1),
_arrayOpType(ARRAY_OP_NORMAL),
_hasNonSimple(false),
_hasDottedField(false),
_queryExpression(queryExpression),
_hasReturnKey(false) {
// Are we including or excluding fields?
// -1 when we haven't initialized it.
// 1 when we're including
// 0 when we're excluding.
int include_exclude = -1;
BSONObjIterator it(_source);
while (it.more()) {
BSONElement e = it.next();
if (!e.isNumber() && !e.isBoolean()) {
_hasNonSimple = true;
}
if (Object == e.type()) {
BSONObj obj = e.embeddedObject();
verify(1 == obj.nFields());
BSONElement e2 = obj.firstElement();
if (mongoutils::str::equals(e2.fieldName(), "$slice")) {
if (e2.isNumber()) {
int i = e2.numberInt();
if (i < 0) {
add(e.fieldName(), i, -i); // limit is now positive
}
else {
add(e.fieldName(), 0, i);
}
}
else {
verify(e2.type() == Array);
BSONObj arr = e2.embeddedObject();
verify(2 == arr.nFields());
BSONObjIterator it(arr);
int skip = it.next().numberInt();
int limit = it.next().numberInt();
verify(limit > 0);
add(e.fieldName(), skip, limit);
}
}
else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) {
_arrayOpType = ARRAY_OP_ELEM_MATCH;
// Create a MatchExpression for the elemMatch.
BSONObj elemMatchObj = e.wrap();
verify(elemMatchObj.isOwned());
_elemMatchObjs.push_back(elemMatchObj);
StatusWithMatchExpression swme = MatchExpressionParser::parse(elemMatchObj);
verify(swme.isOK());
// And store it in _matchers.
_matchers[mongoutils::str::before(e.fieldName(), '.').c_str()]
= swme.getValue();
add(e.fieldName(), true);
}
else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
verify(String == e2.type());
if (mongoutils::str::equals(e2.valuestr(), "text")) {
_meta[e.fieldName()] = META_TEXT;
}
else if (mongoutils::str::equals(e2.valuestr(), "diskloc")) {
_meta[e.fieldName()] = META_DISKLOC;
}
else if (mongoutils::str::equals(e2.valuestr(), "indexKey")) {
_hasReturnKey = true;
// The index key clobbers everything so just stop parsing here.
return;
}
else {
// This shouldn't happen, should be caught by parsing.
verify(0);
}
}
else {
verify(0);
}
}
else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) {
_includeID = false;
}
else {
add(e.fieldName(), e.trueValue());
// Projections of dotted fields aren't covered.
//.........这里部分代码省略.........
示例8: init
void Projection::init( const BSONObj& o ) {
massert( 10371 , "can only add to Projection once", _source.isEmpty());
_source = o;
BSONObjIterator i( o );
int true_false = -1;
while ( i.more() ) {
BSONElement e = i.next();
if ( ! e.isNumber() )
_hasNonSimple = true;
if (e.type() == Object) {
BSONObj obj = e.embeddedObject();
BSONElement e2 = obj.firstElement();
if ( mongoutils::str::equals( e2.fieldName(), "$slice" ) ) {
if (e2.isNumber()) {
int i = e2.numberInt();
if (i < 0)
add(e.fieldName(), i, -i); // limit is now positive
else
add(e.fieldName(), 0, i);
}
else if (e2.type() == Array) {
BSONObj arr = e2.embeddedObject();
uassert(13099, "$slice array wrong size", arr.nFields() == 2 );
BSONObjIterator it(arr);
int skip = it.next().numberInt();
int limit = it.next().numberInt();
uassert(13100, "$slice limit must be positive", limit > 0 );
add(e.fieldName(), skip, limit);
}
else {
uassert(13098, "$slice only supports numbers and [skip, limit] arrays", false);
}
}
else if ( mongoutils::str::equals( e2.fieldName(), "$elemMatch" ) ) {
// validate $elemMatch arguments and dependencies
uassert( 16342, "elemMatch: invalid argument. object required.",
e2.type() == Object );
uassert( 16343, "Cannot specify positional operator and $elemMatch"
" (currently unsupported).",
_arrayOpType != ARRAY_OP_POSITIONAL );
uassert( 16344, "Cannot use $elemMatch projection on a nested field"
" (currently unsupported).",
! mongoutils::str::contains( e.fieldName(), '.' ) );
_arrayOpType = ARRAY_OP_ELEM_MATCH;
// initialize new Matcher object(s)
_matchers.insert( make_pair( mongoutils::str::before( e.fieldName(), '.' ),
boost::make_shared<Matcher>( e.wrap(), true ) ) );
add( e.fieldName(), true );
}
else {
uasserted(13097, string("Unsupported projection option: ") +
obj.firstElementFieldName() );
}
}
else if (!strcmp(e.fieldName(), "_id") && !e.trueValue()) {
_includeID = false;
}
else {
add( e.fieldName(), e.trueValue() );
// validate input
if (true_false == -1) {
true_false = e.trueValue();
_include = !e.trueValue();
}
else {
uassert( 10053 , "You cannot currently mix including and excluding fields. "
"Contact us if this is an issue." ,
(bool)true_false == e.trueValue() );
}
}
if ( mongoutils::str::contains( e.fieldName(), ".$" ) ) {
// positional op found; verify dependencies
uassert( 16345, "Cannot exclude array elements with the positional operator"
" (currently unsupported).", e.trueValue() );
uassert( 16346, "Cannot specify more than one positional array element per query"
" (currently unsupported).", _arrayOpType != ARRAY_OP_POSITIONAL );
uassert( 16347, "Cannot specify positional operator and $elemMatch"
" (currently unsupported).", _arrayOpType != ARRAY_OP_ELEM_MATCH );
_arrayOpType = ARRAY_OP_POSITIONAL;
}
}
}
示例9: _update
void _update( Request& r , DbMessage& d, ChunkManagerPtr manager ){
int flags = d.pullInt();
BSONObj query = d.nextJsObj();
uassert( 10201 , "invalid update" , d.moreJSObjs() );
BSONObj toupdate = d.nextJsObj();
BSONObj chunkFinder = query;
bool upsert = flags & UpdateOption_Upsert;
bool multi = flags & UpdateOption_Multi;
if ( multi )
uassert( 10202 , "can't mix multi and upsert and sharding" , ! upsert );
if ( upsert && !(manager->hasShardKey(toupdate) ||
(toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query))))
{
throw UserException( 8012 , "can't upsert something without shard key" );
}
bool save = false;
if ( ! manager->hasShardKey( query ) ){
if ( multi ){
}
else if ( query.nFields() != 1 || strcmp( query.firstElement().fieldName() , "_id" ) ){
throw UserException( 8013 , "can't do update with query that doesn't have the shard key" );
}
else {
save = true;
chunkFinder = toupdate;
}
}
if ( ! save ){
if ( toupdate.firstElement().fieldName()[0] == '$' ){
BSONObjIterator ops(toupdate);
while(ops.more()){
BSONElement op(ops.next());
if (op.type() != Object)
continue;
BSONObjIterator fields(op.embeddedObject());
while(fields.more()){
const string field = fields.next().fieldName();
uassert(13123, "Can't modify shard key's value", ! manager->getShardKey().partOfShardKey(field));
}
}
} else if ( manager->hasShardKey( toupdate ) ){
uassert( 8014, "change would move shards!", manager->getShardKey().compare( query , toupdate ) == 0 );
} else {
uasserted(12376, "shard key must be in update object");
}
}
if ( multi ){
vector<shared_ptr<ChunkRange> > chunks;
manager->getChunksForQuery( chunks , chunkFinder );
set<Shard> seen;
for ( vector<shared_ptr<ChunkRange> >::iterator i=chunks.begin(); i!=chunks.end(); i++){
shared_ptr<ChunkRange> c = *i;
if ( seen.count( c->getShard() ) )
continue;
doWrite( dbUpdate , r , c->getShard() );
seen.insert( c->getShard() );
}
}
else {
ChunkPtr c = manager->findChunk( chunkFinder );
doWrite( dbUpdate , r , c->getShard() );
c->splitIfShould( d.msg().header()->dataLen() );
}
}
示例10: isValidStatusMemberObject
// static
bool WorkingSetCommon::isValidStatusMemberObject(const BSONObj& obj) {
return obj.nFields() == 3 && obj.hasField("ok") && obj.hasField("code") &&
obj.hasField("errmsg");
}
示例11: init
void Projection::init( const BSONObj& o ) {
massert( 10371 , "can only add to Projection once", _source.isEmpty());
_source = o;
BSONObjIterator i( o );
int true_false = -1;
while ( i.more() ) {
BSONElement e = i.next();
if ( ! e.isNumber() )
_hasNonSimple = true;
if (e.type() == Object) {
BSONObj obj = e.embeddedObject();
BSONElement e2 = obj.firstElement();
if ( strcmp(e2.fieldName(), "$slice") == 0 ) {
if (e2.isNumber()) {
int i = e2.numberInt();
if (i < 0)
add(e.fieldName(), i, -i); // limit is now positive
else
add(e.fieldName(), 0, i);
}
else if (e2.type() == Array) {
BSONObj arr = e2.embeddedObject();
uassert(13099, "$slice array wrong size", arr.nFields() == 2 );
BSONObjIterator it(arr);
int skip = it.next().numberInt();
int limit = it.next().numberInt();
uassert(13100, "$slice limit must be positive", limit > 0 );
add(e.fieldName(), skip, limit);
}
else {
uassert(13098, "$slice only supports numbers and [skip, limit] arrays", false);
}
}
else {
uassert(13097, string("Unsupported projection option: ") + obj.firstElementFieldName(), false);
}
}
else if (!strcmp(e.fieldName(), "_id") && !e.trueValue()) {
_includeID = false;
}
else {
add (e.fieldName(), e.trueValue());
// validate input
if (true_false == -1) {
true_false = e.trueValue();
_include = !e.trueValue();
}
else {
uassert( 10053 , "You cannot currently mix including and excluding fields. Contact us if this is an issue." ,
(bool)true_false == e.trueValue() );
}
}
}
}
示例12: pSort
intrusive_ptr<DocumentSourceSort> DocumentSourceSort::create(
const intrusive_ptr<ExpressionContext>& pExpCtx,
BSONObj sortOrder,
long long limit,
boost::optional<uint64_t> maxMemoryUsageBytes) {
intrusive_ptr<DocumentSourceSort> pSort(new DocumentSourceSort(pExpCtx));
pSort->_maxMemoryUsageBytes = maxMemoryUsageBytes
? *maxMemoryUsageBytes
: internalDocumentSourceSortMaxBlockingSortBytes.load();
pSort->_rawSort = sortOrder.getOwned();
for (auto&& keyField : sortOrder) {
auto fieldName = keyField.fieldNameStringData();
SortPatternPart patternPart;
if (keyField.type() == Object) {
BSONObj metaDoc = keyField.Obj();
// this restriction is due to needing to figure out sort direction
uassert(17312,
"$meta is the only expression supported by $sort right now",
metaDoc.firstElement().fieldNameStringData() == "$meta");
uassert(ErrorCodes::FailedToParse,
"Cannot have additional keys in a $meta sort specification",
metaDoc.nFields() == 1);
VariablesParseState vps = pExpCtx->variablesParseState;
patternPart.expression = ExpressionMeta::parse(pExpCtx, metaDoc.firstElement(), vps);
// If sorting by textScore, sort highest scores first. If sorting by randVal, order
// doesn't matter, so just always use descending.
patternPart.isAscending = false;
pSort->_sortPattern.push_back(std::move(patternPart));
continue;
}
uassert(15974,
"$sort key ordering must be specified using a number or {$meta: 'textScore'}",
keyField.isNumber());
int sortOrder = keyField.numberInt();
uassert(15975,
"$sort key ordering must be 1 (for ascending) or -1 (for descending)",
((sortOrder == 1) || (sortOrder == -1)));
patternPart.fieldPath = FieldPath{fieldName};
patternPart.isAscending = (sortOrder > 0);
pSort->_paths.insert(patternPart.fieldPath->fullPath());
pSort->_sortPattern.push_back(std::move(patternPart));
}
uassert(15976, "$sort stage must have at least one sort key", !pSort->_sortPattern.empty());
pSort->_sortKeyGen = SortKeyGenerator{
// The SortKeyGenerator expects the expressions to be serialized in order to detect a sort
// by a metadata field.
pSort->sortKeyPattern(SortKeySerialization::kForPipelineSerialization).toBson(),
pExpCtx->getCollator()};
if (limit > 0) {
pSort->setLimitSrc(DocumentSourceLimit::create(pExpCtx, limit));
}
return pSort;
}
示例13: toval
jsval toval( const BSONElement& e ) {
switch( e.type() ) {
case EOO:
case jstNULL:
case Undefined:
return JSVAL_NULL;
case NumberDouble:
case NumberInt:
return toval( e.number() );
case Symbol: // TODO: should we make a special class for this
case String:
return toval( e.valuestr() );
case Bool:
return e.boolean() ? JSVAL_TRUE : JSVAL_FALSE;
case Object: {
BSONObj embed = e.embeddedObject().getOwned();
return toval( &embed );
}
case Array: {
BSONObj embed = e.embeddedObject().getOwned();
if ( embed.isEmpty() ) {
return OBJECT_TO_JSVAL( JS_NewArrayObject( _context , 0 , 0 ) );
}
int n = embed.nFields();
JSObject * array = JS_NewArrayObject( _context , n , 0 );
assert( array );
jsval myarray = OBJECT_TO_JSVAL( array );
for ( int i=0; i<n; i++ ) {
jsval v = toval( embed[i] );
assert( JS_SetElement( _context , array , i , &v ) );
}
return myarray;
}
case jstOID: {
OID oid = e.__oid();
JSObject * o = JS_NewObject( _context , &object_id_class , 0 , 0 );
setProperty( o , "str" , toval( oid.str().c_str() ) );
return OBJECT_TO_JSVAL( o );
}
case RegEx: {
const char * flags = e.regexFlags();
uintN flagNumber = 0;
while ( *flags ) {
switch ( *flags ) {
case 'g':
flagNumber |= JSREG_GLOB;
break;
case 'i':
flagNumber |= JSREG_FOLD;
break;
case 'm':
flagNumber |= JSREG_MULTILINE;
break;
//case 'y': flagNumber |= JSREG_STICKY; break;
default:
log() << "warning: unknown regex flag:" << *flags << endl;
}
flags++;
}
JSObject * r = JS_NewRegExpObject( _context , (char*)e.regex() , strlen( e.regex() ) , flagNumber );
assert( r );
return OBJECT_TO_JSVAL( r );
}
case Code: {
JSFunction * func = compileFunction( e.valuestr() );
return OBJECT_TO_JSVAL( JS_GetFunctionObject( func ) );
}
case CodeWScope: {
JSFunction * func = compileFunction( e.codeWScopeCode() );
BSONObj extraScope = e.codeWScopeObject();
if ( ! extraScope.isEmpty() ) {
log() << "warning: CodeWScope doesn't transfer to db.eval" << endl;
}
return OBJECT_TO_JSVAL( JS_GetFunctionObject( func ) );
}
case Date:
return OBJECT_TO_JSVAL( js_NewDateObjectMsec( _context , (jsdouble) e.date().millis ) );
case MinKey:
return OBJECT_TO_JSVAL( JS_NewObject( _context , &minkey_class , 0 , 0 ) );
case MaxKey:
return OBJECT_TO_JSVAL( JS_NewObject( _context , &maxkey_class , 0 , 0 ) );
case Timestamp: {
JSObject * o = JS_NewObject( _context , ×tamp_class , 0 , 0 );
setProperty( o , "t" , toval( (double)(e.timestampTime()) ) );
setProperty( o , "i" , toval( (double)(e.timestampInc()) ) );
//.........这里部分代码省略.........
示例14: skipToNextKey
int IndexCursor::skipToNextKey( const BSONObj ¤tKey ) {
int skipPrefixIndex = _boundsIterator->advance( currentKey );
if ( skipPrefixIndex == -2 ) {
// We are done iterating completely.
_ok = false;
return -2;
}
else if ( skipPrefixIndex == -1 ) {
// We should skip nothing.
return -1;
}
// We should skip to a further key, efficiently.
//
// If after(), skip to the first key greater/less than the key comprised
// of the first "skipPrefixIndex" elements of currentKey, and the rest
// set to MaxKey/MinKey for direction > 0 and direction < 0 respectively.
// eg: skipPrefixIndex = 1, currKey {a:1, b:2, c:1}, direction > 0, so we skip
// to the first key greater than {a:1, b:maxkey, c:maxkey}
//
// If after() is false, we use the same key prefix but set the reamining
// elements to the elements described by cmp(), in order.
// eg: skipPrefixIndex = 1, currKey {a:1, b:2, c:1}) and cmp() [b:5, c:11]
// so we use skip to {a:1, b:5, c:11}, also noting direction.
if ( _boundsIterator->after() ) {
skipPrefix( currentKey, skipPrefixIndex );
} else {
BSONObjBuilder b(currentKey.objsize());
BSONObjIterator it = currentKey.begin();
const vector<const BSONElement *> &endKeys = _boundsIterator->cmp();
const int nFields = currentKey.nFields();
for ( int i = 0; i < nFields; i++ ) {
if ( i < skipPrefixIndex ) {
verify( it.more() );
b.append( it.next() );
} else {
b.appendAs( *endKeys[i] , "" );
}
}
findKey( b.done() );
// Skip passed key prefixes that are not supposed to be inclusive
// as described by _boundsIterator->inc() and endKeys
//
// We'll spend at worst nFields^2 time ensuring all key elements
// are properly set if all the inclusive bits are false and we
// keep landing on keys where the ith element of curr == endkeys[i].
//
// This complexity is usually ok, since this skipping is supposed to
// save us from really big linear scans across the key space in
// some pathological cases. It's not clear whether or not small
// cases are hurt too badly by this algorithm.
bool allInclusive = true;
const vector<bool> &inclusive = _boundsIterator->inc();
for ( int i = 0; i < nFields; i++ ) {
if ( !inclusive[i] ) {
allInclusive = false;
break;
}
}
again: while ( !allInclusive && ok() ) {
BSONObj key = _currKey;
it = key.begin();
dassert( nFields == key.nFields() );
for ( int i = 0; i < nFields; i++ ) {
const BSONElement e = it.next();
if ( i >= skipPrefixIndex && !inclusive[i] && e.valuesEqual(*endKeys[i]) ) {
// The ith element equals the ith endKey but it's not supposed to be inclusive.
// Skipping to the next value for the ith element involves skipping a prefix
// with i + 1 elements.
skipPrefix( key, i + 1 );
goto again;
}
}
break;
}
}
return 0;
}
示例15: invariant
void ExpressionKeysPrivate::getS2Keys(const BSONObj& obj,
const BSONObj& keyPattern,
const S2IndexingParams& params,
BSONObjSet* keys,
MultikeyPaths* multikeyPaths) {
BSONObjSet keysToAdd = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
// Does one of our documents have a geo field?
bool haveGeoField = false;
if (multikeyPaths) {
invariant(multikeyPaths->empty());
multikeyPaths->resize(keyPattern.nFields());
}
size_t posInIdx = 0;
// We output keys in the same order as the fields we index.
for (const auto keyElem : keyPattern) {
// First, we get the keys that this field adds. Either they're added literally from
// the value of the field, or they're transformed if the field is geo.
BSONElementSet fieldElements;
const bool expandArrayOnTrailingField = false;
std::set<size_t>* arrayComponents = multikeyPaths ? &(*multikeyPaths)[posInIdx] : nullptr;
dps::extractAllElementsAlongPath(
obj, keyElem.fieldName(), fieldElements, expandArrayOnTrailingField, arrayComponents);
// Trailing array values aren't being expanded, so we still need to determine whether the
// last component of the indexed path 'keyElem.fieldName()' causes the index to be multikey.
// We say that it does if
// (a) the last component of the indexed path ever refers to an array value (regardless of
// the number of array elements)
// (b) the last component of the indexed path ever refers to GeoJSON data that requires
// multiple cells for its covering.
bool lastPathComponentCausesIndexToBeMultikey;
BSONObjSet keysForThisField = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
if (IndexNames::GEO_2DSPHERE == keyElem.valuestr()) {
if (params.indexVersion >= S2_INDEX_VERSION_2) {
// For >= V2,
// geo: null,
// geo: undefined
// geo: []
// should all behave like there is no geo field. So we look for these cases and
// throw out the field elements if we find them.
if (1 == fieldElements.size()) {
BSONElement elt = *fieldElements.begin();
// Get the :null and :undefined cases.
if (elt.isNull() || Undefined == elt.type()) {
fieldElements.clear();
} else if (elt.isABSONObj()) {
// And this is the :[] case.
BSONObj obj = elt.Obj();
if (0 == obj.nFields()) {
fieldElements.clear();
}
}
}
// >= V2 2dsphere indices require that at least one geo field to be present in a
// document in order to index it.
if (fieldElements.size() > 0) {
haveGeoField = true;
}
}
lastPathComponentCausesIndexToBeMultikey =
getS2GeoKeys(obj, fieldElements, params, &keysForThisField);
} else {
lastPathComponentCausesIndexToBeMultikey =
getS2LiteralKeys(fieldElements, params.collator, &keysForThisField);
}
// We expect there to be the missing field element present in the keys if data is
// missing. So, this should be non-empty.
verify(!keysForThisField.empty());
if (multikeyPaths && lastPathComponentCausesIndexToBeMultikey) {
const size_t pathLengthOfThisField = FieldRef{keyElem.fieldNameStringData()}.numParts();
invariant(pathLengthOfThisField > 0);
(*multikeyPaths)[posInIdx].insert(pathLengthOfThisField - 1);
}
// We take the Cartesian product of all of the keys. This requires that we have
// some keys to take the Cartesian product with. If keysToAdd.empty(), we
// initialize it.
if (keysToAdd.empty()) {
keysToAdd = keysForThisField;
++posInIdx;
continue;
}
BSONObjSet updatedKeysToAdd = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
for (BSONObjSet::const_iterator it = keysToAdd.begin(); it != keysToAdd.end(); ++it) {
for (BSONObjSet::const_iterator newIt = keysForThisField.begin();
newIt != keysForThisField.end();
++newIt) {
BSONObjBuilder b;
b.appendElements(*it);
b.append(newIt->firstElement());
updatedKeysToAdd.insert(b.obj());
//.........这里部分代码省略.........