本文整理汇总了C++中MatchDetails::elemMatchKey方法的典型用法代码示例。如果您正苦于以下问题:C++ MatchDetails::elemMatchKey方法的具体用法?C++ MatchDetails::elemMatchKey怎么用?C++ MatchDetails::elemMatchKey使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MatchDetails
的用法示例。
在下文中一共展示了MatchDetails::elemMatchKey方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run
void run() {
CollatorInterface* collator = nullptr;
M matcher(BSON("a.b" << 1), ExtensionsCallbackDisallowExtensions(), collator);
MatchDetails details;
details.requestElemMatchKey();
ASSERT(!details.hasElemMatchKey());
ASSERT(matcher.matches(fromjson("{ a:[ { b:1 } ] }"), &details));
// The '0' entry of the 'a' array is matched.
ASSERT(details.hasElemMatchKey());
ASSERT_EQUALS(string("0"), details.elemMatchKey());
}
示例2: run
void run() {
M matcher(BSON("a.b" << 1),
MatchExpressionParser::WhereCallback());
MatchDetails details;
details.requestElemMatchKey();
ASSERT( !details.hasElemMatchKey() );
ASSERT( matcher.matches( fromjson( "{ a:[ { b:1 } ] }" ), &details ) );
// The '0' entry of the 'a' array is matched.
ASSERT( details.hasElemMatchKey() );
ASSERT_EQUALS( string( "0" ), details.elemMatchKey() );
}
示例3: run
void run() {
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
M matcher(BSON("a.b" << 1), expCtx);
MatchDetails details;
details.requestElemMatchKey();
ASSERT(!details.hasElemMatchKey());
ASSERT(matcher.matches(fromjson("{ a:[ { b:1 } ] }"), &details));
// The '0' entry of the 'a' array is matched.
ASSERT(details.hasElemMatchKey());
ASSERT_EQUALS(string("0"), details.elemMatchKey());
}
示例4: run
void run() {
client().insert( ns(), fromjson( "{ a:[ {}, { b:1 } ] }" ) );
Client::ReadContext context( ns() );
CoveredIndexMatcher matcher( BSON( "a.b" << 1 ), BSON( "$natural" << 1 ) );
MatchDetails details;
details.requestElemMatchKey();
boost::shared_ptr<Cursor> cursor = NamespaceDetailsTransient::getCursor( ns(), BSONObj() );
// Verify that the cursor is unindexed.
ASSERT_EQUALS( "BasicCursor", cursor->toString() );
ASSERT( matcher.matchesCurrent( cursor.get(), &details ) );
// The '1' entry of the 'a' array is matched.
ASSERT( details.hasElemMatchKey() );
ASSERT_EQUALS( string( "1" ), details.elemMatchKey() );
}
示例5: ASSERT
TEST( SizeMatchExpression, ElemMatchKey ) {
SizeMatchExpression size;
ASSERT( size.init( "a.b", 3 ).isOK() );
MatchDetails details;
details.requestElemMatchKey();
ASSERT( !size.matches( BSON( "a" << 1 ), &details ) );
ASSERT( !details.hasElemMatchKey() );
ASSERT( size.matches( BSON( "a" << BSON( "b" << BSON_ARRAY( 1 << 2 << 3 ) ) ), &details ) );
ASSERT( !details.hasElemMatchKey() );
ASSERT( size.matches( BSON( "a" <<
BSON_ARRAY( 2 <<
BSON( "b" << BSON_ARRAY( 1 << 2 << 3 ) ) ) ),
&details ) );
ASSERT( details.hasElemMatchKey() );
ASSERT_EQUALS( "1", details.elemMatchKey() );
}
示例6: run
void run() {
client().insert( ns(), fromjson( "{ a:[ {}, { b:1 } ] }" ) );
Client::Transaction transaction(DB_SERIALIZABLE);
Client::ReadContext context( ns(), mongo::unittest::EMPTY_STRING );
CoveredIndexMatcher matcher( BSON( "a.b" << 1 ), BSON( "$natural" << 1 ) );
MatchDetails details;
details.requestElemMatchKey();
boost::shared_ptr<Cursor> cursor = getOptimizedCursor( ns(), BSONObj() );
// Verify that the cursor is unindexed.
ASSERT_EQUALS( "BasicCursor", cursor->toString() );
ASSERT( matcher.matchesCurrent( cursor.get(), &details ) );
// The '1' entry of the 'a' array is matched.
ASSERT( details.hasElemMatchKey() );
ASSERT_EQUALS( string( "1" ), details.elemMatchKey() );
transaction.commit();
}
示例7: _updateObjects
//.........这里部分代码省略.........
}
}
/* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
regular ones at the moment. */
if ( isOperatorUpdate ) {
if ( multi ) {
// go to next record in case this one moves
c->advance();
// Update operations are deduped for cursors that implement their own
// deduplication. In particular, some geo cursors are excluded.
if ( autoDedup ) {
if ( seenObjects.count( loc ) ) {
continue;
}
// SERVER-5198 Advance past the document to be modified, provided
// deduplication is enabled, but see SERVER-5725.
while( c->ok() && loc == c->currLoc() ) {
c->advance();
}
}
}
const BSONObj& onDisk = loc.obj();
ModSet* useMods = mods.get();
auto_ptr<ModSet> mymodset;
if ( details.hasElemMatchKey() && mods->hasDynamicArray() ) {
useMods = mods->fixDynamicArray( details.elemMatchKey() );
mymodset.reset( useMods );
}
auto_ptr<ModSetState> mss = useMods->prepare( onDisk,
false /* not an insertion */ );
bool willAdvanceCursor = multi && c->ok() && ( modsIsIndexed || ! mss->canApplyInPlace() );
if ( willAdvanceCursor ) {
if ( cc.get() ) {
cc->setDoingDeletes( true );
}
c->prepareToTouchEarlierIterate();
}
// If we've made it this far, "ns" must contain a valid collection name, and so
// is of the form "db.collection". Therefore, the following expression must
// always be valid. "system.users" updates must never be done in place, in
// order to ensure that they are validated inside DataFileMgr::updateRecord(.).
bool isSystemUsersMod = (NamespaceString(ns).coll == "system.users");
BSONObj newObj;
if ( !mss->isUpdateIndexed() && mss->canApplyInPlace() && !isSystemUsersMod ) {
mss->applyModsInPlace( true );// const_cast<BSONObj&>(onDisk) );
DEBUGUPDATE( "\t\t\t doing in place update" );
if ( !multi )
debug.fastmod = true;
if ( modsIsIndexed ) {
seenObjects.insert( loc );
}
示例8: update
//.........这里部分代码省略.........
// though. (See SERVER-5725, if curious, but "stage" based $or will make that
// ticket moot).
while( cursor->ok() && loc == cursor->currLoc() ) {
cursor->advance();
}
}
// For some (unfortunate) historical reasons, not all cursors would be valid after
// a write simply because we advanced them to a document not affected by the write.
// To protect in those cases, not only we engaged in the advance() logic above, but
// we also tell the cursor we're about to write a document that we've just seen.
// prepareToTouchEarlierIterate() requires calling later
// recoverFromTouchingEarlierIterate(), so we make a note here to do so.
bool touchPreviousDoc = request.isMulti() && cursor->ok();
if ( touchPreviousDoc ) {
if ( clientCursor.get() )
clientCursor->setDoingDeletes( true );
cursor->prepareToTouchEarlierIterate();
}
// Found a matching document
numMatched++;
// Ask the driver to apply the mods. It may be that the driver can apply those "in
// place", that is, some values of the old document just get adjusted without any
// change to the binary layout on the bson layer. It may be that a whole new
// document is needed to accomodate the new bson layout of the resulting document.
doc.reset( oldObj, mutablebson::Document::kInPlaceEnabled );
BSONObj logObj;
// If there was a matched field, obtain it.
string matchedField;
if (matchDetails.hasElemMatchKey())
matchedField = matchDetails.elemMatchKey();
Status status = driver->update( matchedField, &doc, &logObj );
if ( !status.isOK() ) {
uasserted( 16837, status.reason() );
}
// If the driver applied the mods in place, we can ask the mutable for what
// changed. We call those changes "damages". :) We use the damages to inform the
// journal what was changed, and then apply them to the original document
// ourselves. If, however, the driver applied the mods out of place, we ask it to
// generate a new, modified document for us. In that case, the file manager will
// take care of the journaling details for us.
//
// This code flow is admittedly odd. But, right now, journaling is baked in the file
// manager. And if we aren't using the file manager, we have to do jounaling
// ourselves.
bool objectWasChanged = false;
BSONObj newObj;
const char* source = NULL;
bool inPlace = doc.getInPlaceUpdates(&damages, &source);
if ( inPlace && !driver->modsAffectIndices() ) {
// If a set of modifiers were all no-ops, we are still 'in place', but there is
// no work to do, in which case we want to consider the object unchanged.
if (!damages.empty() ) {
nsDetails->paddingFits();
// All updates were in place. Apply them via durability and writing pointer.
mutablebson::DamageVector::const_iterator where = damages.begin();
const mutablebson::DamageVector::const_iterator end = damages.end();
for( ; where != end; ++where ) {
const char* sourcePtr = source + where->sourceOffset;
void* targetPtr = getDur().writingPtr(
示例9: _updateObjectsNEW
//.........这里部分代码省略.........
// possible that the last document of the previous child is the same as the
// first document of the next (see SERVER-5198 and jstests/orp.js).
//
// So we advance the cursor here until we see a new diskloc.
//
// Note that we won't be yielding, and we may not do so for a while if we find
// a particularly duplicated sequence of loc's. That is highly unlikely,
// though. (See SERVER-5725, if curious, but "stage" based $or will make that
// ticket moot).
while( cursor->ok() && loc == cursor->currLoc() ) {
cursor->advance();
}
}
// For some (unfortunate) historical reasons, not all cursors would be valid after
// a write simply because we advanced them to a document not affected by the write.
// To protect in those cases, not only we engaged in the advance() logic above, but
// we also tell the cursor we're about to write a document that we've just seen.
// prepareToTouchEarlierIterate() requires calling later
// recoverFromTouchingEarlierIterate(), so we make a note here to do so.
bool touchPreviousDoc = multi && cursor->ok();
if ( touchPreviousDoc ) {
clientCursor->setDoingDeletes( true );
cursor->prepareToTouchEarlierIterate();
}
// Ask the driver to apply the mods. It may be that the driver can apply those "in
// place", that is, some values of the old document just get adjusted without any
// change to the binary layout on the bson layer. It may be that a whole new
// document is needed to accomodate the new bson layout of the resulting document.
mutablebson::Document doc( oldObj, mutablebson::Document::kInPlaceEnabled );
BSONObj logObj;
StringData matchedField = matchDetails.hasElemMatchKey() ?
matchDetails.elemMatchKey():
StringData();
status = driver.update( matchedField, &doc, &logObj );
if ( !status.isOK() ) {
uasserted( 16837, status.reason() );
}
// If the driver applied the mods in place, we can ask the mutable for what
// changed. We call those changes "damages". :) We use the damages to inform the
// journal what was changed, and then apply them to the original document
// ourselves. If, however, the driver applied the mods out of place, we ask it to
// generate a new, modified document for us. In that case, the file manager will
// take care of the journaling details for us.
//
// This code flow is admittedly odd. But, right now, journaling is baked in the file
// manager. And if we aren't using the file manager, we have to do jounaling
// ourselves.
BSONObj newObj;
const char* source = NULL;
mutablebson::DamageVector damages;
bool inPlace = doc.getInPlaceUpdates(&damages, &source);
if ( inPlace && !driver.modsAffectIndices() ) {
// All updates were in place. Apply them via durability and writing pointer.
mutablebson::DamageVector::const_iterator where = damages.begin();
const mutablebson::DamageVector::const_iterator end = damages.end();
for( ; where != end; ++where ) {
const char* sourcePtr = source + where->sourceOffset;
void* targetPtr = getDur().writingPtr(
const_cast<char*>(oldObj.objdata()) + where->targetOffset,
where->size);
std::memcpy(targetPtr, sourcePtr, where->size);
}
示例10: update
//.........这里部分代码省略.........
// that reason.
// TODO: Do we want to pull this out of the underlying query plan?
opDebug->nscanned++;
// Found a matching document
opDebug->nscannedObjects++;
numMatched++;
// Ask the driver to apply the mods. It may be that the driver can apply those "in
// place", that is, some values of the old document just get adjusted without any
// change to the binary layout on the bson layer. It may be that a whole new
// document is needed to accomodate the new bson layout of the resulting document.
doc.reset(oldObj, mutablebson::Document::kInPlaceEnabled);
BSONObj logObj;
FieldRefSet updatedFields;
Status status = Status::OK();
if (!driver->needMatchDetails()) {
// If we don't need match details, avoid doing the rematch
status = driver->update(StringData(), &doc, &logObj, &updatedFields);
}
else {
// If there was a matched field, obtain it.
MatchDetails matchDetails;
matchDetails.requestElemMatchKey();
dassert(cq);
verify(cq->root()->matchesBSON(oldObj, &matchDetails));
string matchedField;
if (matchDetails.hasElemMatchKey())
matchedField = matchDetails.elemMatchKey();
// TODO: Right now, each mod checks in 'prepare' that if it needs positional
// data, that a non-empty StringData() was provided. In principle, we could do
// that check here in an else clause to the above conditional and remove the
// checks from the mods.
status = driver->update(matchedField, &doc, &logObj, &updatedFields);
}
if (!status.isOK()) {
uasserted(16837, status.reason());
}
// Ensure _id exists and is first
uassertStatusOK(ensureIdAndFirst(doc));
// If the driver applied the mods in place, we can ask the mutable for what
// changed. We call those changes "damages". :) We use the damages to inform the
// journal what was changed, and then apply them to the original document
// ourselves. If, however, the driver applied the mods out of place, we ask it to
// generate a new, modified document for us. In that case, the file manager will
// take care of the journaling details for us.
//
// This code flow is admittedly odd. But, right now, journaling is baked in the file
// manager. And if we aren't using the file manager, we have to do jounaling
// ourselves.
bool docWasModified = false;
BSONObj newObj;
const char* source = NULL;
bool inPlace = doc.getInPlaceUpdates(&damages, &source);
// If something changed in the document, verify that no immutable fields were changed
示例11: transformAndUpdate
BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& recordId) {
const UpdateRequest* request = _params.request;
UpdateDriver* driver = _params.driver;
CanonicalQuery* cq = _params.canonicalQuery;
UpdateLifecycle* lifecycle = request->getLifecycle();
// If asked to return new doc, default to the oldObj, in case nothing changes.
BSONObj newObj = oldObj.value();
// Ask the driver to apply the mods. It may be that the driver can apply those "in
// place", that is, some values of the old document just get adjusted without any
// change to the binary layout on the bson layer. It may be that a whole new document
// is needed to accomodate the new bson layout of the resulting document. In any event,
// only enable in-place mutations if the underlying storage engine offers support for
// writing damage events.
_doc.reset(oldObj.value(),
(_collection->updateWithDamagesSupported()
? mutablebson::Document::kInPlaceEnabled
: mutablebson::Document::kInPlaceDisabled));
BSONObj logObj;
bool docWasModified = false;
Status status = Status::OK();
const bool validateForStorage = getOpCtx()->writesAreReplicated() && _enforceOkForStorage;
FieldRefSet immutablePaths;
if (getOpCtx()->writesAreReplicated() && !request->isFromMigration()) {
if (lifecycle) {
auto immutablePathsVector =
getImmutableFields(getOpCtx(), request->getNamespaceString());
if (immutablePathsVector) {
immutablePaths.fillFrom(
transitional_tools_do_not_use::unspool_vector(*immutablePathsVector));
}
}
immutablePaths.keepShortest(&idFieldRef);
}
if (!driver->needMatchDetails()) {
// If we don't need match details, avoid doing the rematch
status = driver->update(
StringData(), &_doc, validateForStorage, immutablePaths, &logObj, &docWasModified);
} else {
// If there was a matched field, obtain it.
MatchDetails matchDetails;
matchDetails.requestElemMatchKey();
dassert(cq);
verify(cq->root()->matchesBSON(oldObj.value(), &matchDetails));
string matchedField;
if (matchDetails.hasElemMatchKey())
matchedField = matchDetails.elemMatchKey();
status = driver->update(
matchedField, &_doc, validateForStorage, immutablePaths, &logObj, &docWasModified);
}
if (!status.isOK()) {
uasserted(16837, status.reason());
}
// Skip adding _id field if the collection is capped (since capped collection documents can
// neither grow nor shrink).
const auto createIdField = !_collection->isCapped();
// Ensure if _id exists it is first
status = ensureIdFieldIsFirst(&_doc);
if (status.code() == ErrorCodes::InvalidIdField) {
// Create ObjectId _id field if we are doing that
if (createIdField) {
addObjectIDIdField(&_doc);
}
} else {
uassertStatusOK(status);
}
// See if the changes were applied in place
const char* source = NULL;
const bool inPlace = _doc.getInPlaceUpdates(&_damages, &source);
if (inPlace && _damages.empty()) {
// An interesting edge case. A modifier didn't notice that it was really a no-op
// during its 'prepare' phase. That represents a missed optimization, but we still
// shouldn't do any real work. Toggle 'docWasModified' to 'false'.
//
// Currently, an example of this is '{ $push : { x : {$each: [], $sort: 1} } }' when the 'x'
// array exists and is already sorted.
docWasModified = false;
}
if (docWasModified) {
// Prepare to write back the modified document
WriteUnitOfWork wunit(getOpCtx());
RecordId newRecordId;
OplogUpdateEntryArgs args;
if (!request->isExplain()) {
invariant(_collection);
//.........这里部分代码省略.........
示例12: transformAndUpdate
void UpdateStage::transformAndUpdate(BSONObj& oldObj, DiskLoc& loc) {
const UpdateRequest* request = _params.request;
UpdateDriver* driver = _params.driver;
CanonicalQuery* cq = _params.canonicalQuery;
UpdateLifecycle* lifecycle = request->getLifecycle();
// Ask the driver to apply the mods. It may be that the driver can apply those "in
// place", that is, some values of the old document just get adjusted without any
// change to the binary layout on the bson layer. It may be that a whole new
// document is needed to accomodate the new bson layout of the resulting document.
_doc.reset(oldObj, mutablebson::Document::kInPlaceEnabled);
BSONObj logObj;
FieldRefSet updatedFields;
Status status = Status::OK();
if (!driver->needMatchDetails()) {
// If we don't need match details, avoid doing the rematch
status = driver->update(StringData(), &_doc, &logObj, &updatedFields);
}
else {
// If there was a matched field, obtain it.
MatchDetails matchDetails;
matchDetails.requestElemMatchKey();
dassert(cq);
verify(cq->root()->matchesBSON(oldObj, &matchDetails));
string matchedField;
if (matchDetails.hasElemMatchKey())
matchedField = matchDetails.elemMatchKey();
// TODO: Right now, each mod checks in 'prepare' that if it needs positional
// data, that a non-empty StringData() was provided. In principle, we could do
// that check here in an else clause to the above conditional and remove the
// checks from the mods.
status = driver->update(matchedField, &_doc, &logObj, &updatedFields);
}
if (!status.isOK()) {
uasserted(16837, status.reason());
}
// Ensure _id exists and is first
uassertStatusOK(ensureIdAndFirst(_doc));
// If the driver applied the mods in place, we can ask the mutable for what
// changed. We call those changes "damages". :) We use the damages to inform the
// journal what was changed, and then apply them to the original document
// ourselves. If, however, the driver applied the mods out of place, we ask it to
// generate a new, modified document for us. In that case, the file manager will
// take care of the journaling details for us.
//
// This code flow is admittedly odd. But, right now, journaling is baked in the file
// manager. And if we aren't using the file manager, we have to do jounaling
// ourselves.
bool docWasModified = false;
BSONObj newObj;
const char* source = NULL;
bool inPlace = _doc.getInPlaceUpdates(&_damages, &source);
// If something changed in the document, verify that no immutable fields were changed
// and data is valid for storage.
if ((!inPlace || !_damages.empty()) ) {
if (!(request->isFromReplication() || request->isFromMigration())) {
const std::vector<FieldRef*>* immutableFields = NULL;
if (lifecycle)
immutableFields = lifecycle->getImmutableFields();
uassertStatusOK(validate(oldObj,
updatedFields,
_doc,
immutableFields,
driver->modOptions()) );
}
}
// Save state before making changes
saveState();
{
WriteUnitOfWork wunit(request->getOpCtx());
if (inPlace && !driver->modsAffectIndices()) {
// If a set of modifiers were all no-ops, we are still 'in place', but there
// is no work to do, in which case we want to consider the object unchanged.
if (!_damages.empty() ) {
// Don't actually do the write if this is an explain.
if (!request->isExplain()) {
invariant(_collection);
const RecordData oldRec(oldObj.objdata(), oldObj.objsize());
_collection->updateDocumentWithDamages(request->getOpCtx(), loc,
oldRec, source, _damages);
}
docWasModified = true;
_specificStats.fastmod = true;
}
//.........这里部分代码省略.........
示例13: transform
Status ProjectionExec::transform(const BSONObj& in,
BSONObjBuilder* bob,
const MatchDetails* details) const {
const ArrayOpType& arrayOpType = _arrayOpType;
BSONObjIterator it(in);
while (it.more()) {
BSONElement elt = it.next();
// Case 1: _id
if (mongoutils::str::equals("_id", elt.fieldName())) {
if (_includeID) {
bob->append(elt);
}
continue;
}
// Case 2: no array projection for this field.
Matchers::const_iterator matcher = _matchers.find(elt.fieldName());
if (_matchers.end() == matcher) {
Status s = append(bob, elt, details, arrayOpType);
if (!s.isOK()) {
return s;
}
continue;
}
// Case 3: field has array projection with $elemMatch specified.
if (ARRAY_OP_ELEM_MATCH != arrayOpType) {
return Status(ErrorCodes::BadValue, "Matchers are only supported for $elemMatch");
}
MatchDetails arrayDetails;
arrayDetails.requestElemMatchKey();
if (matcher->second->matchesBSON(in, &arrayDetails)) {
FieldMap::const_iterator fieldIt = _fields.find(elt.fieldName());
if (_fields.end() == fieldIt) {
return Status(ErrorCodes::BadValue,
"$elemMatch specified, but projection field not found.");
}
BSONArrayBuilder arrBuilder;
BSONObjBuilder subBob;
if (in.getField(elt.fieldName()).eoo()) {
return Status(ErrorCodes::InternalError,
"$elemMatch called on document element with eoo");
}
if (in.getField(elt.fieldName()).Obj().getField(arrayDetails.elemMatchKey()).eoo()) {
return Status(ErrorCodes::InternalError,
"$elemMatch called on array element with eoo");
}
arrBuilder.append(
in.getField(elt.fieldName()).Obj().getField(arrayDetails.elemMatchKey()));
subBob.appendArray(matcher->first, arrBuilder.arr());
Status status = append(bob, subBob.done().firstElement(), details, arrayOpType);
if (!status.isOK()) {
return status;
}
}
}
return Status::OK();
}
示例14: _updateObjects
//.........这里部分代码省略.........
continue;
}
BSONObj currentObj = c->current();
BSONObj pattern = patternOrig;
if ( logop ) {
BSONObjBuilder idPattern;
BSONElement id;
// NOTE: If the matching object lacks an id, we'll log
// with the original pattern. This isn't replay-safe.
// It might make sense to suppress the log instead
// if there's no id.
if ( currentObj.getObjectID( id ) ) {
idPattern.append( id );
pattern = idPattern.obj();
}
else {
uassert( 10157 , "multi-update requires all modified objects to have an _id" , ! multi );
}
}
/* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
regular ones at the moment. */
struct LogOpUpdateDetails loud;
loud.logop = logop;
loud.ns = ns;
loud.fromMigrate = fromMigrate;
if ( isOperatorUpdate ) {
if ( multi ) {
// Make our own copies of the currPK and currentObj before we invalidate
// them by advancing the cursor.
currPK = currPK.copy();
currentObj = currentObj.copy();
// Advance past the document to be modified. This used to be because of SERVER-5198,
// but TokuMX does it because we want to avoid needing to do manual deduplication
// of this PK on the next iteration if the current update modifies the next
// entry in the index. For example, an index scan over a:1 with mod {$inc: {a:1}}
// would cause every other key read to be a duplicate if we didn't advance here.
while ( c->ok() && currPK == c->currPK() ) {
c->advance();
}
// Multi updates need to do their own deduplication because updates may modify the
// keys the cursor is in the process of scanning over.
if ( seenObjects.count( currPK ) ) {
continue;
} else {
seenObjects.insert( currPK );
}
}
ModSet* useMods = mods.get();
auto_ptr<ModSet> mymodset;
if ( details.hasElemMatchKey() && mods->hasDynamicArray() ) {
useMods = mods->fixDynamicArray( details.elemMatchKey() );
mymodset.reset( useMods );
}
auto_ptr<ModSetState> mss = useMods->prepare( currentObj );
updateUsingMods( d, currPK, currentObj, *mss, &loud );
numModded++;
if ( ! multi )
return UpdateResult( 1 , 1 , numModded , BSONObj() );
continue;
} // end if operator is update
uassert( 10158 , "multi update only works with $ operators" , ! multi );
updateNoMods( d, currPK, currentObj, updateobj, &loud );
return UpdateResult( 1 , 0 , 1 , BSONObj() );
} while ( c->ok() );
} // endif
if ( numModded )
return UpdateResult( 1 , 1 , numModded , BSONObj() );
if ( upsert ) {
BSONObj newObj = updateobj;
if ( updateobj.firstElementFieldName()[0] == '$' ) {
// upsert of an $operation. build a default object
BSONObj newObj = mods->createNewFromQuery( patternOrig );
debug.fastmodinsert = true;
insertAndLog( ns, d, newObj, logop, fromMigrate );
return UpdateResult( 0 , 1 , 1 , newObj );
}
uassert( 10159 , "multi update only works with $ operators" , ! multi );
debug.upsert = true;
insertAndLog( ns, d, newObj, logop, fromMigrate );
return UpdateResult( 0 , 0 , 1 , newObj );
}
return UpdateResult( 0 , isOperatorUpdate , 0 , BSONObj() );
}