本文整理汇总了C++中MatchDetails::hasElemMatchKey方法的典型用法代码示例。如果您正苦于以下问题:C++ MatchDetails::hasElemMatchKey方法的具体用法?C++ MatchDetails::hasElemMatchKey怎么用?C++ MatchDetails::hasElemMatchKey使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MatchDetails
的用法示例。
在下文中一共展示了MatchDetails::hasElemMatchKey方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: BSON
TEST( ElemMatchValueMatchExpression, ElemMatchKey ) {
BSONObj baseOperand = BSON( "$gt" << 6 );
auto_ptr<ComparisonMatchExpression> gt( new ComparisonMatchExpression() );
ASSERT( gt->init( "", ComparisonMatchExpression::GT, baseOperand[ "$gt" ] ).isOK() );
ElemMatchValueMatchExpression op;
ASSERT( op.init( "a.b", gt.release() ).isOK() );
MatchDetails details;
details.requestElemMatchKey();
ASSERT( !op.matches( BSONObj(), &details ) );
ASSERT( !details.hasElemMatchKey() );
ASSERT( !op.matches( BSON( "a" << BSON( "b" << BSON_ARRAY( 2 ) ) ),
&details ) );
ASSERT( !details.hasElemMatchKey() );
ASSERT( op.matches( BSON( "a" << BSON( "b" << BSON_ARRAY( 3 << 7 ) ) ),
&details ) );
ASSERT( details.hasElemMatchKey() );
// The entry within the $elemMatch array is reported.
ASSERT_EQUALS( "1", details.elemMatchKey() );
ASSERT( op.matches( BSON( "a" <<
BSON_ARRAY( 1 << 2 <<
BSON( "b" << BSON_ARRAY( 3 << 7 ) ) ) ),
&details ) );
ASSERT( details.hasElemMatchKey() );
// The entry within a parent of the $elemMatch array is reported.
ASSERT_EQUALS( "2", details.elemMatchKey() );
}
示例2: BSON
TEST(AndOp, ElemMatchKey) {
BSONObj baseOperand1 = BSON("a" << 1);
BSONObj baseOperand2 = BSON("b" << 2);
unique_ptr<ComparisonMatchExpression> sub1(new EqualityMatchExpression());
ASSERT(sub1->init("a", baseOperand1["a"]).isOK());
unique_ptr<ComparisonMatchExpression> sub2(new EqualityMatchExpression());
ASSERT(sub2->init("b", baseOperand2["b"]).isOK());
AndMatchExpression andOp;
andOp.add(sub1.release());
andOp.add(sub2.release());
MatchDetails details;
details.requestElemMatchKey();
ASSERT(!andOp.matchesBSON(BSON("a" << BSON_ARRAY(1)), &details));
ASSERT(!details.hasElemMatchKey());
ASSERT(!andOp.matchesBSON(BSON("b" << BSON_ARRAY(2)), &details));
ASSERT(!details.hasElemMatchKey());
ASSERT(andOp.matchesBSON(BSON("a" << BSON_ARRAY(1) << "b" << BSON_ARRAY(1 << 2)), &details));
ASSERT(details.hasElemMatchKey());
// The elem match key for the second $and clause is recorded.
ASSERT_EQUALS("1", details.elemMatchKey());
}
示例3: run
void run() {
Matcher matcher( BSON( "a.b" << 1 ) );
MatchDetails details;
details.requestElemMatchKey();
ASSERT( !details.hasElemMatchKey() );
ASSERT( matcher.matches( fromjson( "{ a:[ { b:1 } ] }" ), &details ) );
// The '0' entry of the 'a' array is matched.
ASSERT( details.hasElemMatchKey() );
ASSERT_EQUALS( string( "0" ), details.elemMatchKey() );
}
示例4: run
void run() {
CollatorInterface* collator = nullptr;
M matcher(BSON("a.b" << 1), ExtensionsCallbackDisallowExtensions(), collator);
MatchDetails details;
details.requestElemMatchKey();
ASSERT(!details.hasElemMatchKey());
ASSERT(matcher.matches(fromjson("{ a:[ { b:1 } ] }"), &details));
// The '0' entry of the 'a' array is matched.
ASSERT(details.hasElemMatchKey());
ASSERT_EQUALS(string("0"), details.elemMatchKey());
}
示例5: run
void run() {
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
M matcher(BSON("a.b" << 1), expCtx);
MatchDetails details;
details.requestElemMatchKey();
ASSERT(!details.hasElemMatchKey());
ASSERT(matcher.matches(fromjson("{ a:[ { b:1 } ] }"), &details));
// The '0' entry of the 'a' array is matched.
ASSERT(details.hasElemMatchKey());
ASSERT_EQUALS(string("0"), details.elemMatchKey());
}
示例6: ASSERT
TEST( SizeMatchExpression, ElemMatchKey ) {
SizeMatchExpression size;
ASSERT( size.init( "a.b", 3 ).isOK() );
MatchDetails details;
details.requestElemMatchKey();
ASSERT( !size.matches( BSON( "a" << 1 ), &details ) );
ASSERT( !details.hasElemMatchKey() );
ASSERT( size.matches( BSON( "a" << BSON( "b" << BSON_ARRAY( 1 << 2 << 3 ) ) ), &details ) );
ASSERT( !details.hasElemMatchKey() );
ASSERT( size.matches( BSON( "a" <<
BSON_ARRAY( 2 <<
BSON( "b" << BSON_ARRAY( 1 << 2 << 3 ) ) ) ),
&details ) );
ASSERT( details.hasElemMatchKey() );
ASSERT_EQUALS( "1", details.elemMatchKey() );
}
示例7: BSON
TEST(OrOp, ElemMatchKey) {
BSONObj baseOperand1 = BSON("a" << 1);
BSONObj baseOperand2 = BSON("b" << 2);
const CollatorInterface* collator = nullptr;
unique_ptr<ComparisonMatchExpression> sub1(new EqualityMatchExpression(collator));
ASSERT(sub1->init("a", baseOperand1["a"]).isOK());
unique_ptr<ComparisonMatchExpression> sub2(new EqualityMatchExpression(collator));
ASSERT(sub2->init("b", baseOperand2["b"]).isOK());
OrMatchExpression orOp;
orOp.add(sub1.release());
orOp.add(sub2.release());
MatchDetails details;
details.requestElemMatchKey();
ASSERT(!orOp.matchesBSON(BSONObj(), &details));
ASSERT(!details.hasElemMatchKey());
ASSERT(!orOp.matchesBSON(BSON("a" << BSON_ARRAY(10) << "b" << BSON_ARRAY(10)), &details));
ASSERT(!details.hasElemMatchKey());
ASSERT(orOp.matchesBSON(BSON("a" << BSON_ARRAY(1) << "b" << BSON_ARRAY(1 << 2)), &details));
// The elem match key feature is not implemented for $or.
ASSERT(!details.hasElemMatchKey());
}
示例8: TEST
TEST( OrOp, ElemMatchKey ) {
BSONObj baseOperand1 = BSON( "a" << 1 );
BSONObj baseOperand2 = BSON( "b" << 2 );
auto_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
ASSERT( sub1->init( "a", ComparisonMatchExpression::EQ, baseOperand1[ "a" ] ).isOK() );
auto_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
ASSERT( sub2->init( "b", ComparisonMatchExpression::EQ, baseOperand2[ "b" ] ).isOK() );
OrMatchExpression orOp;
orOp.add( sub1.release() );
orOp.add( sub2.release() );
MatchDetails details;
details.requestElemMatchKey();
ASSERT( !orOp.matches( BSONObj(), &details ) );
ASSERT( !details.hasElemMatchKey() );
ASSERT( !orOp.matches( BSON( "a" << BSON_ARRAY( 10 ) << "b" << BSON_ARRAY( 10 ) ),
&details ) );
ASSERT( !details.hasElemMatchKey() );
ASSERT( orOp.matches( BSON( "a" << BSON_ARRAY( 1 ) << "b" << BSON_ARRAY( 1 << 2 ) ),
&details ) );
// The elem match key feature is not implemented for $or.
ASSERT( !details.hasElemMatchKey() );
}
示例9: run
void run() {
client().insert( ns(), fromjson( "{ a:[ {}, { b:1 } ] }" ) );
Client::Transaction transaction(DB_SERIALIZABLE);
Client::ReadContext context( ns(), mongo::unittest::EMPTY_STRING );
CoveredIndexMatcher matcher( BSON( "a.b" << 1 ), BSON( "$natural" << 1 ) );
MatchDetails details;
details.requestElemMatchKey();
boost::shared_ptr<Cursor> cursor = getOptimizedCursor( ns(), BSONObj() );
// Verify that the cursor is unindexed.
ASSERT_EQUALS( "BasicCursor", cursor->toString() );
ASSERT( matcher.matchesCurrent( cursor.get(), &details ) );
// The '1' entry of the 'a' array is matched.
ASSERT( details.hasElemMatchKey() );
ASSERT_EQUALS( string( "1" ), details.elemMatchKey() );
transaction.commit();
}
示例10: transformAndUpdate
void UpdateStage::transformAndUpdate(BSONObj& oldObj, DiskLoc& loc) {
const UpdateRequest* request = _params.request;
UpdateDriver* driver = _params.driver;
CanonicalQuery* cq = _params.canonicalQuery;
UpdateLifecycle* lifecycle = request->getLifecycle();
// Ask the driver to apply the mods. It may be that the driver can apply those "in
// place", that is, some values of the old document just get adjusted without any
// change to the binary layout on the bson layer. It may be that a whole new
// document is needed to accomodate the new bson layout of the resulting document.
_doc.reset(oldObj, mutablebson::Document::kInPlaceEnabled);
BSONObj logObj;
FieldRefSet updatedFields;
Status status = Status::OK();
if (!driver->needMatchDetails()) {
// If we don't need match details, avoid doing the rematch
status = driver->update(StringData(), &_doc, &logObj, &updatedFields);
}
else {
// If there was a matched field, obtain it.
MatchDetails matchDetails;
matchDetails.requestElemMatchKey();
dassert(cq);
verify(cq->root()->matchesBSON(oldObj, &matchDetails));
string matchedField;
if (matchDetails.hasElemMatchKey())
matchedField = matchDetails.elemMatchKey();
// TODO: Right now, each mod checks in 'prepare' that if it needs positional
// data, that a non-empty StringData() was provided. In principle, we could do
// that check here in an else clause to the above conditional and remove the
// checks from the mods.
status = driver->update(matchedField, &_doc, &logObj, &updatedFields);
}
if (!status.isOK()) {
uasserted(16837, status.reason());
}
// Ensure _id exists and is first
uassertStatusOK(ensureIdAndFirst(_doc));
// If the driver applied the mods in place, we can ask the mutable for what
// changed. We call those changes "damages". :) We use the damages to inform the
// journal what was changed, and then apply them to the original document
// ourselves. If, however, the driver applied the mods out of place, we ask it to
// generate a new, modified document for us. In that case, the file manager will
// take care of the journaling details for us.
//
// This code flow is admittedly odd. But, right now, journaling is baked in the file
// manager. And if we aren't using the file manager, we have to do jounaling
// ourselves.
bool docWasModified = false;
BSONObj newObj;
const char* source = NULL;
bool inPlace = _doc.getInPlaceUpdates(&_damages, &source);
// If something changed in the document, verify that no immutable fields were changed
// and data is valid for storage.
if ((!inPlace || !_damages.empty()) ) {
if (!(request->isFromReplication() || request->isFromMigration())) {
const std::vector<FieldRef*>* immutableFields = NULL;
if (lifecycle)
immutableFields = lifecycle->getImmutableFields();
uassertStatusOK(validate(oldObj,
updatedFields,
_doc,
immutableFields,
driver->modOptions()) );
}
}
// Save state before making changes
saveState();
{
WriteUnitOfWork wunit(request->getOpCtx());
if (inPlace && !driver->modsAffectIndices()) {
// If a set of modifiers were all no-ops, we are still 'in place', but there
// is no work to do, in which case we want to consider the object unchanged.
if (!_damages.empty() ) {
// Don't actually do the write if this is an explain.
if (!request->isExplain()) {
invariant(_collection);
const RecordData oldRec(oldObj.objdata(), oldObj.objsize());
_collection->updateDocumentWithDamages(request->getOpCtx(), loc,
oldRec, source, _damages);
}
docWasModified = true;
_specificStats.fastmod = true;
}
//.........这里部分代码省略.........
示例11: _updateObjects
//.........这里部分代码省略.........
continue;
}
BSONObj currentObj = c->current();
BSONObj pattern = patternOrig;
if ( logop ) {
BSONObjBuilder idPattern;
BSONElement id;
// NOTE: If the matching object lacks an id, we'll log
// with the original pattern. This isn't replay-safe.
// It might make sense to suppress the log instead
// if there's no id.
if ( currentObj.getObjectID( id ) ) {
idPattern.append( id );
pattern = idPattern.obj();
}
else {
uassert( 10157 , "multi-update requires all modified objects to have an _id" , ! multi );
}
}
/* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
regular ones at the moment. */
struct LogOpUpdateDetails loud;
loud.logop = logop;
loud.ns = ns;
loud.fromMigrate = fromMigrate;
if ( isOperatorUpdate ) {
if ( multi ) {
// Make our own copies of the currPK and currentObj before we invalidate
// them by advancing the cursor.
currPK = currPK.copy();
currentObj = currentObj.copy();
// Advance past the document to be modified. This used to be because of SERVER-5198,
// but TokuMX does it because we want to avoid needing to do manual deduplication
// of this PK on the next iteration if the current update modifies the next
// entry in the index. For example, an index scan over a:1 with mod {$inc: {a:1}}
// would cause every other key read to be a duplicate if we didn't advance here.
while ( c->ok() && currPK == c->currPK() ) {
c->advance();
}
// Multi updates need to do their own deduplication because updates may modify the
// keys the cursor is in the process of scanning over.
if ( seenObjects.count( currPK ) ) {
continue;
} else {
seenObjects.insert( currPK );
}
}
ModSet* useMods = mods.get();
auto_ptr<ModSet> mymodset;
if ( details.hasElemMatchKey() && mods->hasDynamicArray() ) {
useMods = mods->fixDynamicArray( details.elemMatchKey() );
mymodset.reset( useMods );
}
auto_ptr<ModSetState> mss = useMods->prepare( currentObj );
updateUsingMods( d, currPK, currentObj, *mss, &loud );
numModded++;
if ( ! multi )
return UpdateResult( 1 , 1 , numModded , BSONObj() );
continue;
} // end if operator is update
uassert( 10158 , "multi update only works with $ operators" , ! multi );
updateNoMods( d, currPK, currentObj, updateobj, &loud );
return UpdateResult( 1 , 0 , 1 , BSONObj() );
} while ( c->ok() );
} // endif
if ( numModded )
return UpdateResult( 1 , 1 , numModded , BSONObj() );
if ( upsert ) {
BSONObj newObj = updateobj;
if ( updateobj.firstElementFieldName()[0] == '$' ) {
// upsert of an $operation. build a default object
BSONObj newObj = mods->createNewFromQuery( patternOrig );
debug.fastmodinsert = true;
insertAndLog( ns, d, newObj, logop, fromMigrate );
return UpdateResult( 0 , 1 , 1 , newObj );
}
uassert( 10159 , "multi update only works with $ operators" , ! multi );
debug.upsert = true;
insertAndLog( ns, d, newObj, logop, fromMigrate );
return UpdateResult( 0 , 0 , 1 , newObj );
}
return UpdateResult( 0 , isOperatorUpdate , 0 , BSONObj() );
}
示例12: _updateObjects
//.........这里部分代码省略.........
uassert( 10157 , "multi-update requires all modified objects to have an _id" , ! multi );
}
}
/* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
regular ones at the moment. */
if ( isOperatorUpdate ) {
if ( multi ) {
// go to next record in case this one moves
c->advance();
// Update operations are deduped for cursors that implement their own
// deduplication. In particular, some geo cursors are excluded.
if ( autoDedup ) {
if ( seenObjects.count( loc ) ) {
continue;
}
// SERVER-5198 Advance past the document to be modified, provided
// deduplication is enabled, but see SERVER-5725.
while( c->ok() && loc == c->currLoc() ) {
c->advance();
}
}
}
const BSONObj& onDisk = loc.obj();
ModSet* useMods = mods.get();
auto_ptr<ModSet> mymodset;
if ( details.hasElemMatchKey() && mods->hasDynamicArray() ) {
useMods = mods->fixDynamicArray( details.elemMatchKey() );
mymodset.reset( useMods );
}
auto_ptr<ModSetState> mss = useMods->prepare( onDisk,
false /* not an insertion */ );
bool willAdvanceCursor = multi && c->ok() && ( modsIsIndexed || ! mss->canApplyInPlace() );
if ( willAdvanceCursor ) {
if ( cc.get() ) {
cc->setDoingDeletes( true );
}
c->prepareToTouchEarlierIterate();
}
// If we've made it this far, "ns" must contain a valid collection name, and so
// is of the form "db.collection". Therefore, the following expression must
// always be valid. "system.users" updates must never be done in place, in
// order to ensure that they are validated inside DataFileMgr::updateRecord(.).
bool isSystemUsersMod = (NamespaceString(ns).coll == "system.users");
BSONObj newObj;
if ( !mss->isUpdateIndexed() && mss->canApplyInPlace() && !isSystemUsersMod ) {
mss->applyModsInPlace( true );// const_cast<BSONObj&>(onDisk) );
DEBUGUPDATE( "\t\t\t doing in place update" );
if ( !multi )
debug.fastmod = true;
if ( modsIsIndexed ) {
seenObjects.insert( loc );
示例13: update
//.........这里部分代码省略.........
// a particularly duplicated sequence of loc's. That is highly unlikely,
// though. (See SERVER-5725, if curious, but "stage" based $or will make that
// ticket moot).
while( cursor->ok() && loc == cursor->currLoc() ) {
cursor->advance();
}
}
// For some (unfortunate) historical reasons, not all cursors would be valid after
// a write simply because we advanced them to a document not affected by the write.
// To protect in those cases, not only we engaged in the advance() logic above, but
// we also tell the cursor we're about to write a document that we've just seen.
// prepareToTouchEarlierIterate() requires calling later
// recoverFromTouchingEarlierIterate(), so we make a note here to do so.
bool touchPreviousDoc = request.isMulti() && cursor->ok();
if ( touchPreviousDoc ) {
if ( clientCursor.get() )
clientCursor->setDoingDeletes( true );
cursor->prepareToTouchEarlierIterate();
}
// Found a matching document
numMatched++;
// Ask the driver to apply the mods. It may be that the driver can apply those "in
// place", that is, some values of the old document just get adjusted without any
// change to the binary layout on the bson layer. It may be that a whole new
// document is needed to accomodate the new bson layout of the resulting document.
doc.reset( oldObj, mutablebson::Document::kInPlaceEnabled );
BSONObj logObj;
// If there was a matched field, obtain it.
string matchedField;
if (matchDetails.hasElemMatchKey())
matchedField = matchDetails.elemMatchKey();
Status status = driver->update( matchedField, &doc, &logObj );
if ( !status.isOK() ) {
uasserted( 16837, status.reason() );
}
// If the driver applied the mods in place, we can ask the mutable for what
// changed. We call those changes "damages". :) We use the damages to inform the
// journal what was changed, and then apply them to the original document
// ourselves. If, however, the driver applied the mods out of place, we ask it to
// generate a new, modified document for us. In that case, the file manager will
// take care of the journaling details for us.
//
// This code flow is admittedly odd. But, right now, journaling is baked in the file
// manager. And if we aren't using the file manager, we have to do jounaling
// ourselves.
bool objectWasChanged = false;
BSONObj newObj;
const char* source = NULL;
bool inPlace = doc.getInPlaceUpdates(&damages, &source);
if ( inPlace && !driver->modsAffectIndices() ) {
// If a set of modifiers were all no-ops, we are still 'in place', but there is
// no work to do, in which case we want to consider the object unchanged.
if (!damages.empty() ) {
nsDetails->paddingFits();
// All updates were in place. Apply them via durability and writing pointer.
mutablebson::DamageVector::const_iterator where = damages.begin();
const mutablebson::DamageVector::const_iterator end = damages.end();
for( ; where != end; ++where ) {
const char* sourcePtr = source + where->sourceOffset;
示例14: _updateObjectsNEW
//.........这里部分代码省略.........
// one of $or child and started consuming the next one. In that case, it is
// possible that the last document of the previous child is the same as the
// first document of the next (see SERVER-5198 and jstests/orp.js).
//
// So we advance the cursor here until we see a new diskloc.
//
// Note that we won't be yielding, and we may not do so for a while if we find
// a particularly duplicated sequence of loc's. That is highly unlikely,
// though. (See SERVER-5725, if curious, but "stage" based $or will make that
// ticket moot).
while( cursor->ok() && loc == cursor->currLoc() ) {
cursor->advance();
}
}
// For some (unfortunate) historical reasons, not all cursors would be valid after
// a write simply because we advanced them to a document not affected by the write.
// To protect in those cases, not only we engaged in the advance() logic above, but
// we also tell the cursor we're about to write a document that we've just seen.
// prepareToTouchEarlierIterate() requires calling later
// recoverFromTouchingEarlierIterate(), so we make a note here to do so.
bool touchPreviousDoc = multi && cursor->ok();
if ( touchPreviousDoc ) {
clientCursor->setDoingDeletes( true );
cursor->prepareToTouchEarlierIterate();
}
// Ask the driver to apply the mods. It may be that the driver can apply those "in
// place", that is, some values of the old document just get adjusted without any
// change to the binary layout on the bson layer. It may be that a whole new
// document is needed to accomodate the new bson layout of the resulting document.
mutablebson::Document doc( oldObj, mutablebson::Document::kInPlaceEnabled );
BSONObj logObj;
StringData matchedField = matchDetails.hasElemMatchKey() ?
matchDetails.elemMatchKey():
StringData();
status = driver.update( matchedField, &doc, &logObj );
if ( !status.isOK() ) {
uasserted( 16837, status.reason() );
}
// If the driver applied the mods in place, we can ask the mutable for what
// changed. We call those changes "damages". :) We use the damages to inform the
// journal what was changed, and then apply them to the original document
// ourselves. If, however, the driver applied the mods out of place, we ask it to
// generate a new, modified document for us. In that case, the file manager will
// take care of the journaling details for us.
//
// This code flow is admittedly odd. But, right now, journaling is baked in the file
// manager. And if we aren't using the file manager, we have to do jounaling
// ourselves.
BSONObj newObj;
const char* source = NULL;
mutablebson::DamageVector damages;
bool inPlace = doc.getInPlaceUpdates(&damages, &source);
if ( inPlace && !driver.modsAffectIndices() ) {
// All updates were in place. Apply them via durability and writing pointer.
mutablebson::DamageVector::const_iterator where = damages.begin();
const mutablebson::DamageVector::const_iterator end = damages.end();
for( ; where != end; ++where ) {
const char* sourcePtr = source + where->sourceOffset;
void* targetPtr = getDur().writingPtr(
const_cast<char*>(oldObj.objdata()) + where->targetOffset,
where->size);
std::memcpy(targetPtr, sourcePtr, where->size);
示例15: update
//.........这里部分代码省略.........
// deemed duplicated. The final 'numMatched' and 'nscanned' numbers may differ for
// that reason.
// TODO: Do we want to pull this out of the underlying query plan?
opDebug->nscanned++;
// Found a matching document
opDebug->nscannedObjects++;
numMatched++;
// Ask the driver to apply the mods. It may be that the driver can apply those "in
// place", that is, some values of the old document just get adjusted without any
// change to the binary layout on the bson layer. It may be that a whole new
// document is needed to accomodate the new bson layout of the resulting document.
doc.reset(oldObj, mutablebson::Document::kInPlaceEnabled);
BSONObj logObj;
FieldRefSet updatedFields;
Status status = Status::OK();
if (!driver->needMatchDetails()) {
// If we don't need match details, avoid doing the rematch
status = driver->update(StringData(), &doc, &logObj, &updatedFields);
}
else {
// If there was a matched field, obtain it.
MatchDetails matchDetails;
matchDetails.requestElemMatchKey();
dassert(cq);
verify(cq->root()->matchesBSON(oldObj, &matchDetails));
string matchedField;
if (matchDetails.hasElemMatchKey())
matchedField = matchDetails.elemMatchKey();
// TODO: Right now, each mod checks in 'prepare' that if it needs positional
// data, that a non-empty StringData() was provided. In principle, we could do
// that check here in an else clause to the above conditional and remove the
// checks from the mods.
status = driver->update(matchedField, &doc, &logObj, &updatedFields);
}
if (!status.isOK()) {
uasserted(16837, status.reason());
}
// Ensure _id exists and is first
uassertStatusOK(ensureIdAndFirst(doc));
// If the driver applied the mods in place, we can ask the mutable for what
// changed. We call those changes "damages". :) We use the damages to inform the
// journal what was changed, and then apply them to the original document
// ourselves. If, however, the driver applied the mods out of place, we ask it to
// generate a new, modified document for us. In that case, the file manager will
// take care of the journaling details for us.
//
// This code flow is admittedly odd. But, right now, journaling is baked in the file
// manager. And if we aren't using the file manager, we have to do jounaling
// ourselves.
bool docWasModified = false;
BSONObj newObj;
const char* source = NULL;
bool inPlace = doc.getInPlaceUpdates(&damages, &source);