本文整理汇总了C++中WorkingSetMember::hasRecordId方法的典型用法代码示例。如果您正苦于以下问题:C++ WorkingSetMember::hasRecordId方法的具体用法?C++ WorkingSetMember::hasRecordId怎么用?C++ WorkingSetMember::hasRecordId使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类WorkingSetMember
的用法示例。
在下文中一共展示了WorkingSetMember::hasRecordId方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run
void run() {
// Various variables we'll need.
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
Collection* coll = ctx.getCollection();
ASSERT(coll);
const int targetDocIndex = 0;
const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex));
const auto ws = make_unique<WorkingSet>();
const unique_ptr<CanonicalQuery> cq(canonicalize(query));
// Get the RecordIds that would be returned by an in-order scan.
vector<RecordId> recordIds;
getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
const BSONObj oldDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex);
member->obj = Snapshotted<BSONObj>(SnapshotId(), oldDoc);
ws->transitionToRecordIdAndObj(id);
qds->pushBack(id);
// Configure the delete.
auto deleteParams = std::make_unique<DeleteStageParams>();
deleteParams->returnDeleted = true;
deleteParams->canonicalQuery = cq.get();
const auto deleteStage = make_unique<DeleteStage>(
&_opCtx, std::move(deleteParams), ws.get(), coll, qds.release());
const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage->getSpecificStats());
// Should return advanced.
id = WorkingSet::INVALID_ID;
PlanStage::StageState state = deleteStage->work(&id);
ASSERT_EQUALS(PlanStage::ADVANCED, state);
// Make sure the returned value is what we expect it to be.
// Should give us back a valid id.
ASSERT_TRUE(WorkingSet::INVALID_ID != id);
WorkingSetMember* resultMember = ws->get(id);
// With an owned copy of the object, with no RecordId.
ASSERT_TRUE(resultMember->hasOwnedObj());
ASSERT_FALSE(resultMember->hasRecordId());
ASSERT_EQUALS(resultMember->getState(), WorkingSetMember::OWNED_OBJ);
ASSERT_TRUE(resultMember->obj.value().isOwned());
// Should be the old value.
ASSERT_BSONOBJ_EQ(resultMember->obj.value(), oldDoc);
// Should have done the delete.
ASSERT_EQUALS(stats->docsDeleted, 1U);
// That should be it.
id = WorkingSet::INVALID_ID;
ASSERT_EQUALS(PlanStage::IS_EOF, deleteStage->work(&id));
}
示例2: workBackwardsScan
PlanStage::StageState OplogStart::workBackwardsScan(WorkingSetID* out) {
PlanStage::StageState state = child()->work(out);
// EOF. Just start from the beginning, which is where we've hit.
if (PlanStage::IS_EOF == state) {
_done = true;
return state;
}
if (PlanStage::ADVANCED != state) {
return state;
}
WorkingSetMember* member = _workingSet->get(*out);
verify(member->hasObj());
verify(member->hasRecordId());
if (!_filter->matchesBSON(member->obj.value())) {
_done = true;
// RecordId is returned in *out.
return PlanStage::ADVANCED;
} else {
_workingSet->free(*out);
return PlanStage::NEED_TIME;
}
}
示例3: readFirstChild
PlanStage::StageState AndHashStage::readFirstChild(WorkingSetID* out) {
verify(_currentChild == 0);
WorkingSetID id = WorkingSet::INVALID_ID;
StageState childStatus = workChild(0, &id);
if (PlanStage::ADVANCED == childStatus) {
WorkingSetMember* member = _ws->get(id);
// The child must give us a WorkingSetMember with a record id, since we intersect index keys
// based on the record id. The planner ensures that the child stage can never produce an WSM
// with no record id.
invariant(member->hasRecordId());
if (!_dataMap.insert(std::make_pair(member->recordId, id)).second) {
// Didn't insert because we already had this RecordId inside the map. This should only
// happen if we're seeing a newer copy of the same doc in a more recent snapshot.
// Throw out the newer copy of the doc.
_ws->free(id);
return PlanStage::NEED_TIME;
}
// Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
member->makeObjOwnedIfNeeded();
// Update memory stats.
_memUsage += member->getMemUsage();
return PlanStage::NEED_TIME;
} else if (PlanStage::IS_EOF == childStatus) {
// Done reading child 0.
_currentChild = 1;
// If our first child was empty, don't scan any others, no possible results.
if (_dataMap.empty()) {
_hashingChildren = false;
return PlanStage::IS_EOF;
}
_specificStats.mapAfterChild.push_back(_dataMap.size());
return PlanStage::NEED_TIME;
} else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
*out = id;
return childStatus;
} else {
if (PlanStage::NEED_YIELD == childStatus) {
*out = id;
}
return childStatus;
}
}
示例4: doInvalidate
void CachedPlanStage::doInvalidate(OperationContext* txn,
const RecordId& dl,
InvalidationType type) {
for (auto it = _results.begin(); it != _results.end(); ++it) {
WorkingSetMember* member = _ws->get(*it);
if (member->hasRecordId() && member->recordId == dl) {
WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
}
}
}
示例5: doInvalidate
void FetchStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
// It's possible that the recordId getting invalidated is the one we're about to
// fetch. In this case we do a "forced fetch" and put the WSM in owned object state.
if (WorkingSet::INVALID_ID != _idRetrying) {
WorkingSetMember* member = _ws->get(_idRetrying);
if (member->hasRecordId() && (member->recordId == dl)) {
// Fetch it now and kill the recordId.
WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
}
}
}
示例6: doInvalidate
void IDHackStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
// Since updates can't mutate the '_id' field, we can ignore mutation invalidations.
if (INVALIDATION_MUTATION == type) {
return;
}
// It's possible that the RecordId getting invalidated is the one we're about to
// fetch. In this case we do a "forced fetch" and put the WSM in owned object state.
if (WorkingSet::INVALID_ID != _idBeingPagedIn) {
WorkingSetMember* member = _workingSet->get(_idBeingPagedIn);
if (member->hasRecordId() && (member->recordId == dl)) {
// Fetch it now and kill the RecordId.
WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
}
}
}
示例7: fetch
// static
bool WorkingSetCommon::fetch(OperationContext* txn,
WorkingSet* workingSet,
WorkingSetID id,
unowned_ptr<SeekableRecordCursor> cursor) {
WorkingSetMember* member = workingSet->get(id);
// The RecordFetcher should already have been transferred out of the WSM and used.
invariant(!member->hasFetcher());
// We should have a RecordId but need to retrieve the obj. Get the obj now and reset all WSM
// state appropriately.
invariant(member->hasRecordId());
member->obj.reset();
auto record = cursor->seekExact(member->recordId);
if (!record) {
return false;
}
member->obj = {txn->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
if (member->isSuspicious) {
// Make sure that all of the keyData is still valid for this copy of the document.
// This ensures both that index-provided filters and sort orders still hold.
// TODO provide a way for the query planner to opt out of this checking if it is
// unneeded due to the structure of the plan.
invariant(!member->keyData.empty());
for (size_t i = 0; i < member->keyData.size(); i++) {
BSONObjSet keys;
// There's no need to compute the prefixes of the indexed fields that cause the index to
// be multikey when ensuring the keyData is still valid.
MultikeyPaths* multikeyPaths = nullptr;
member->keyData[i].index->getKeys(member->obj.value(), &keys, multikeyPaths);
if (!keys.count(member->keyData[i].keyData)) {
// document would no longer be at this position in the index.
return false;
}
}
member->isSuspicious = false;
}
member->keyData.clear();
workingSet->transitionToRecordIdAndObj(id);
return true;
}
示例8: getRecordIds
void getRecordIds(Collection* collection,
CollectionScanParams::Direction direction,
vector<RecordId>* out) {
WorkingSet ws;
CollectionScanParams params;
params.direction = direction;
params.tailable = false;
unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
verify(member->hasRecordId());
out->push_back(member->recordId);
}
}
}
示例9: doInvalidate
void MergeSortStage::doInvalidate(OperationContext* txn,
const RecordId& dl,
InvalidationType type) {
// Go through our data and see if we're holding on to the invalidated RecordId.
for (list<StageWithValue>::iterator valueIt = _mergingData.begin();
valueIt != _mergingData.end();
valueIt++) {
WorkingSetMember* member = _ws->get(valueIt->id);
if (member->hasRecordId() && (dl == member->recordId)) {
// Fetch the about-to-be mutated result.
WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
++_specificStats.forcedFetches;
}
}
// If we see the deleted RecordId again it is not the same record as it once was so we still
// want to return it.
if (_dedup && INVALIDATION_DELETION == type) {
_seen.erase(dl);
}
}
示例10: doWork
PlanStage::StageState MergeSortStage::doWork(WorkingSetID* out) {
if (isEOF()) {
return PlanStage::IS_EOF;
}
if (!_noResultToMerge.empty()) {
// We have some child that we don't have a result from. Each child must have a result
// in order to pick the minimum result among all our children. Work a child.
PlanStage* child = _noResultToMerge.front();
WorkingSetID id = WorkingSet::INVALID_ID;
StageState code = child->work(&id);
if (PlanStage::ADVANCED == code) {
WorkingSetMember* member = _ws->get(id);
// If we're deduping...
if (_dedup) {
if (!member->hasRecordId()) {
// Can't dedup data unless there's a RecordId. We go ahead and use its
// result.
_noResultToMerge.pop();
} else {
++_specificStats.dupsTested;
// ...and there's a RecordId and and we've seen the RecordId before
if (_seen.end() != _seen.find(member->recordId)) {
// ...drop it.
_ws->free(id);
++_specificStats.dupsDropped;
return PlanStage::NEED_TIME;
} else {
// Otherwise, note that we've seen it.
_seen.insert(member->recordId);
// We're going to use the result from the child, so we remove it from
// the queue of children without a result.
_noResultToMerge.pop();
}
}
} else {
// Not deduping. We use any result we get from the child. Remove the child
// from the queue of things without a result.
_noResultToMerge.pop();
}
// Store the result in our list.
StageWithValue value;
value.id = id;
value.stage = child;
// Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
member->makeObjOwnedIfNeeded();
_mergingData.push_front(value);
// Insert the result (indirectly) into our priority queue.
_merging.push(_mergingData.begin());
return PlanStage::NEED_TIME;
} else if (PlanStage::IS_EOF == code) {
// There are no more results possible from this child. Don't bother with it
// anymore.
_noResultToMerge.pop();
return PlanStage::NEED_TIME;
} else if (PlanStage::FAILURE == code || PlanStage::DEAD == code) {
*out = id;
// If a stage fails, it may create a status WSM to indicate why it
// failed, in which case 'id' is valid. If ID is invalid, we
// create our own error message.
if (WorkingSet::INVALID_ID == id) {
mongoutils::str::stream ss;
ss << "merge sort stage failed to read in results from child";
Status status(ErrorCodes::InternalError, ss);
*out = WorkingSetCommon::allocateStatusMember(_ws, status);
}
return code;
} else {
if (PlanStage::NEED_YIELD == code) {
*out = id;
}
return code;
}
}
// If we're here, for each non-EOF child, we have a valid WSID.
verify(!_merging.empty());
// Get the 'min' WSID. _merging is a priority queue so its top is the smallest.
MergingRef top = _merging.top();
_merging.pop();
// Since we're returning the WSID that came from top->stage, we need to work(...) it again
// to get a new result.
_noResultToMerge.push(top->stage);
// Save the ID that we're returning and remove the returned result from our data.
WorkingSetID idToTest = top->id;
_mergingData.erase(top);
// Return the min.
*out = idToTest;
return PlanStage::ADVANCED;
//.........这里部分代码省略.........
示例11: getNextImpl
PlanExecutor::ExecState PlanExecutor::getNextImpl(Snapshotted<BSONObj>* objOut, RecordId* dlOut) {
if (MONGO_FAIL_POINT(planExecutorAlwaysFails)) {
Status status(ErrorCodes::OperationFailed,
str::stream() << "PlanExecutor hit planExecutorAlwaysFails fail point");
*objOut =
Snapshotted<BSONObj>(SnapshotId(), WorkingSetCommon::buildMemberStatusObject(status));
return PlanExecutor::FAILURE;
}
invariant(_currentState == kUsable);
if (isMarkedAsKilled()) {
if (NULL != objOut) {
Status status(ErrorCodes::OperationFailed,
str::stream() << "Operation aborted because: " << *_killReason);
*objOut = Snapshotted<BSONObj>(SnapshotId(),
WorkingSetCommon::buildMemberStatusObject(status));
}
return PlanExecutor::DEAD;
}
if (!_stash.empty()) {
invariant(objOut && !dlOut);
*objOut = {SnapshotId(), _stash.front()};
_stash.pop();
return PlanExecutor::ADVANCED;
}
// When a stage requests a yield for document fetch, it gives us back a RecordFetcher*
// to use to pull the record into memory. We take ownership of the RecordFetcher here,
// deleting it after we've had a chance to do the fetch. For timing-based yields, we
// just pass a NULL fetcher.
unique_ptr<RecordFetcher> fetcher;
// Incremented on every writeConflict, reset to 0 on any successful call to _root->work.
size_t writeConflictsInARow = 0;
for (;;) {
// These are the conditions which can cause us to yield:
// 1) The yield policy's timer elapsed, or
// 2) some stage requested a yield due to a document fetch, or
// 3) we need to yield and retry due to a WriteConflictException.
// In all cases, the actual yielding happens here.
if (_yieldPolicy->shouldYield()) {
if (!_yieldPolicy->yield(fetcher.get())) {
// A return of false from a yield should only happen if we've been killed during the
// yield.
invariant(isMarkedAsKilled());
if (NULL != objOut) {
Status status(ErrorCodes::OperationFailed,
str::stream() << "Operation aborted because: " << *_killReason);
*objOut = Snapshotted<BSONObj>(
SnapshotId(), WorkingSetCommon::buildMemberStatusObject(status));
}
return PlanExecutor::DEAD;
}
}
// We're done using the fetcher, so it should be freed. We don't want to
// use the same RecordFetcher twice.
fetcher.reset();
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState code = _root->work(&id);
if (code != PlanStage::NEED_YIELD)
writeConflictsInARow = 0;
if (PlanStage::ADVANCED == code) {
WorkingSetMember* member = _workingSet->get(id);
bool hasRequestedData = true;
if (NULL != objOut) {
if (WorkingSetMember::RID_AND_IDX == member->getState()) {
if (1 != member->keyData.size()) {
_workingSet->free(id);
hasRequestedData = false;
} else {
// TODO: currently snapshot ids are only associated with documents, and
// not with index keys.
*objOut = Snapshotted<BSONObj>(SnapshotId(), member->keyData[0].keyData);
}
} else if (member->hasObj()) {
*objOut = member->obj;
} else {
_workingSet->free(id);
hasRequestedData = false;
}
}
if (NULL != dlOut) {
if (member->hasRecordId()) {
*dlOut = member->recordId;
} else {
_workingSet->free(id);
hasRequestedData = false;
}
}
//.........这里部分代码省略.........
示例12: doWork
PlanStage::StageState FetchStage::doWork(WorkingSetID* out) {
if (isEOF()) {
return PlanStage::IS_EOF;
}
// Either retry the last WSM we worked on or get a new one from our child.
WorkingSetID id;
StageState status;
if (_idRetrying == WorkingSet::INVALID_ID) {
status = child()->work(&id);
} else {
status = ADVANCED;
id = _idRetrying;
_idRetrying = WorkingSet::INVALID_ID;
}
if (PlanStage::ADVANCED == status) {
WorkingSetMember* member = _ws->get(id);
// If there's an obj there, there is no fetching to perform.
if (member->hasObj()) {
++_specificStats.alreadyHasObj;
} else {
// We need a valid RecordId to fetch from and this is the only state that has one.
verify(WorkingSetMember::RID_AND_IDX == member->getState());
verify(member->hasRecordId());
try {
if (!_cursor)
_cursor = _collection->getCursor(getOpCtx());
if (auto fetcher = _cursor->fetcherForId(member->recordId)) {
// There's something to fetch. Hand the fetcher off to the WSM, and pass up
// a fetch request.
_idRetrying = id;
member->setFetcher(fetcher.release());
*out = id;
return NEED_YIELD;
}
// The doc is already in memory, so go ahead and grab it. Now we have a RecordId
// as well as an unowned object
if (!WorkingSetCommon::fetch(getOpCtx(), _ws, id, _cursor)) {
_ws->free(id);
return NEED_TIME;
}
} catch (const WriteConflictException&) {
// Ensure that the BSONObj underlying the WorkingSetMember is owned because it may
// be freed when we yield.
member->makeObjOwnedIfNeeded();
_idRetrying = id;
*out = WorkingSet::INVALID_ID;
return NEED_YIELD;
}
}
return returnIfMatches(member, id, out);
} else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
*out = id;
return status;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
}
return status;
}
示例13: doWork
//.........这里部分代码省略.........
for (size_t j = 0; j < kLookAheadWorks; ++j) {
// Cache the result in _lookAheadResults[i].
StageState childStatus = child->work(&_lookAheadResults[i]);
if (PlanStage::IS_EOF == childStatus) {
// A child went right to EOF. Bail out.
_hashingChildren = false;
_dataMap.clear();
return PlanStage::IS_EOF;
} else if (PlanStage::ADVANCED == childStatus) {
// Ensure that the BSONObj underlying the WorkingSetMember is owned in case we
// yield.
_ws->get(_lookAheadResults[i])->makeObjOwnedIfNeeded();
break; // Stop looking at this child.
} else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
// The stage which produces a failure is responsible for allocating a working
// set member with error details.
invariant(WorkingSet::INVALID_ID != _lookAheadResults[i]);
*out = _lookAheadResults[i];
_hashingChildren = false;
_dataMap.clear();
return childStatus;
}
// We ignore NEED_TIME. TODO: what do we want to do if we get NEED_YIELD here?
}
}
// We did a bunch of work above, return NEED_TIME to be fair.
return PlanStage::NEED_TIME;
}
// An AND is either reading the first child into the hash table, probing against the hash
// table with subsequent children, or checking the last child's results to see if they're
// in the hash table.
// We read the first child into our hash table.
if (_hashingChildren) {
// Check memory usage of previously hashed results.
if (_memUsage > _maxMemUsage) {
mongoutils::str::stream ss;
ss << "hashed AND stage buffered data usage of " << _memUsage
<< " bytes exceeds internal limit of " << kDefaultMaxMemUsageBytes << " bytes";
Status status(ErrorCodes::Overflow, ss);
*out = WorkingSetCommon::allocateStatusMember(_ws, status);
return PlanStage::FAILURE;
}
if (0 == _currentChild) {
return readFirstChild(out);
} else if (_currentChild < _children.size() - 1) {
return hashOtherChildren(out);
} else {
_hashingChildren = false;
// We don't hash our last child. Instead, we probe the table created from the
// previous children, returning results in the order of the last child.
// Fall through to below.
}
}
// Returning results. We read from the last child and return the results that are in our
// hash map.
// We should be EOF if we're not hashing results and the dataMap is empty.
verify(!_dataMap.empty());
// We probe _dataMap with the last child.
verify(_currentChild == _children.size() - 1);
// Get the next result for the (_children.size() - 1)-th child.
StageState childStatus = workChild(_children.size() - 1, out);
if (PlanStage::ADVANCED != childStatus) {
return childStatus;
}
// We know that we've ADVANCED. See if the WSM is in our table.
WorkingSetMember* member = _ws->get(*out);
// The child must give us a WorkingSetMember with a record id, since we intersect index keys
// based on the record id. The planner ensures that the child stage can never produce an WSM
// with no record id.
invariant(member->hasRecordId());
DataMap::iterator it = _dataMap.find(member->recordId);
if (_dataMap.end() == it) {
// Child's output wasn't in every previous child. Throw it out.
_ws->free(*out);
return PlanStage::NEED_TIME;
} else {
// Child's output was in every previous child. Merge any key data in
// the child's output and free the child's just-outputted WSM.
WorkingSetID hashID = it->second;
_dataMap.erase(it);
AndCommon::mergeFrom(_ws, hashID, *member);
_ws->free(*out);
*out = hashID;
return PlanStage::ADVANCED;
}
}
示例14: run
void run() {
OldClientWriteContext ctx(&_txn, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
WriteUnitOfWork wuow(&_txn);
coll = db->createCollection(&_txn, ns());
wuow.commit();
}
WorkingSet ws;
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
auto ms = make_unique<MergeSortStage>(&_txn, msparams, &ws, coll);
IndexScanParams params;
params.bounds.isSimpleRange = true;
params.bounds.startKey = objWithMinKey(1);
params.bounds.endKey = objWithMaxKey(1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
// Index 'a'+i has foo equal to 'i'.
int numIndices = 20;
for (int i = 0; i < numIndices; ++i) {
// 'a', 'b', ...
string index(1, 'a' + i);
insert(BSON(index << 1 << "foo" << i));
BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
addIndex(indexSpec);
params.descriptor = getIndex(indexSpec, coll);
ms->addChild(new IndexScan(&_txn, params, &ws, NULL));
}
set<RecordId> recordIds;
getRecordIds(&recordIds, coll);
set<RecordId>::iterator it = recordIds.begin();
// Get 10 results. Should be getting results in order of 'recordIds'.
int count = 0;
while (!ms->isEOF() && count < 10) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState status = ms->work(&id);
if (PlanStage::ADVANCED != status) {
continue;
}
WorkingSetMember* member = ws.get(id);
ASSERT_EQUALS(member->recordId, *it);
BSONElement elt;
string index(1, 'a' + count);
ASSERT(member->getFieldDotted(index, &elt));
ASSERT_EQUALS(1, elt.numberInt());
ASSERT(member->getFieldDotted("foo", &elt));
ASSERT_EQUALS(count, elt.numberInt());
++count;
++it;
}
// Invalidate recordIds[11]. Should force a fetch and return the deleted document.
ms->saveState();
ms->invalidate(&_txn, *it, INVALIDATION_DELETION);
ms->restoreState();
// Make sure recordIds[11] was fetched for us.
{
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState status;
do {
status = ms->work(&id);
} while (PlanStage::ADVANCED != status);
WorkingSetMember* member = ws.get(id);
ASSERT(!member->hasRecordId());
ASSERT(member->hasObj());
string index(1, 'a' + count);
BSONElement elt;
ASSERT_TRUE(member->getFieldDotted(index, &elt));
ASSERT_EQUALS(1, elt.numberInt());
ASSERT(member->getFieldDotted("foo", &elt));
ASSERT_EQUALS(count, elt.numberInt());
++it;
++count;
}
// And get the rest.
while (!ms->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState status = ms->work(&id);
if (PlanStage::ADVANCED != status) {
continue;
}
WorkingSetMember* member = ws.get(id);
ASSERT_EQUALS(member->recordId, *it);
//.........这里部分代码省略.........
示例15: doWork
PlanStage::StageState SortStage::doWork(WorkingSetID* out) {
const size_t maxBytes = static_cast<size_t>(internalQueryExecMaxBlockingSortBytes.load());
if (_memUsage > maxBytes) {
str::stream ss;
ss << "Sort operation used more than the maximum " << maxBytes
<< " bytes of RAM. Add an index, or specify a smaller limit.";
Status status(ErrorCodes::OperationFailed, ss);
*out = WorkingSetCommon::allocateStatusMember(_ws, status);
return PlanStage::FAILURE;
}
if (isEOF()) {
return PlanStage::IS_EOF;
}
// Still reading in results to sort.
if (!_sorted) {
WorkingSetID id = WorkingSet::INVALID_ID;
StageState code = child()->work(&id);
if (PlanStage::ADVANCED == code) {
WorkingSetMember* member = _ws->get(id);
SortableDataItem item;
item.wsid = id;
// We extract the sort key from the WSM's computed data. This must have been generated
// by a SortKeyGeneratorStage descendent in the execution tree.
auto sortKeyComputedData =
static_cast<const SortKeyComputedData*>(member->getComputed(WSM_SORT_KEY));
item.sortKey = sortKeyComputedData->getSortKey();
if (member->hasRecordId()) {
// The RecordId breaks ties when sorting two WSMs with the same sort key.
item.recordId = member->recordId;
}
addToBuffer(item);
return PlanStage::NEED_TIME;
} else if (PlanStage::IS_EOF == code) {
// TODO: We don't need the lock for this. We could ask for a yield and do this work
// unlocked. Also, this is performing a lot of work for one call to work(...)
sortBuffer();
_resultIterator = _data.begin();
_sorted = true;
return PlanStage::NEED_TIME;
} else if (PlanStage::FAILURE == code) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
*out = id;
return code;
} else if (PlanStage::NEED_YIELD == code) {
*out = id;
}
return code;
}
// Returning results.
verify(_resultIterator != _data.end());
verify(_sorted);
*out = _resultIterator->wsid;
_resultIterator++;
return PlanStage::ADVANCED;
}