本文整理汇总了C++中PlanExecutor::saveState方法的典型用法代码示例。如果您正苦于以下问题:C++ PlanExecutor::saveState方法的具体用法?C++ PlanExecutor::saveState怎么用?C++ PlanExecutor::saveState使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类PlanExecutor
的用法示例。
在下文中一共展示了PlanExecutor::saveState方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getMore
//.........这里部分代码省略.........
}
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
// Propagate this error to caller.
if (PlanExecutor::FAILURE == state) {
scoped_ptr<PlanStageStats> stats(exec->getStats());
error() << "Plan executor error, stats: "
<< Explain::statsToBSON(*stats);
uasserted(17406, "getMore executor error: " +
WorkingSetCommon::toStatusString(obj));
}
// In the old system tailable capped cursors would be killed off at the
// cursorid level. If a tailable capped cursor is nuked the cursorid
// would vanish.
//
// In the new system they die and are cleaned up later (or time out).
// So this is where we get to remove the cursorid.
if (0 == numResults) {
resultFlags = ResultFlag_CursorNotFound;
}
}
const bool shouldSaveCursor =
shouldSaveCursorGetMore(state, exec, isCursorTailable(cc));
// In order to deregister a cursor, we need to be holding the DB + collection lock and
// if the cursor is aggregation, we release these locks.
if (cc->isAggCursor()) {
invariant(NULL == ctx.get());
unpinDBLock.reset(new Lock::DBLock(txn->lockState(), nss.db(), MODE_IS));
unpinCollLock.reset(new Lock::CollectionLock(txn->lockState(), nss.ns(), MODE_IS));
}
// Our two possible ClientCursorPin cleanup paths are:
// 1) If the cursor is not going to be saved, we call deleteUnderlying() on the pin.
// 2) If the cursor is going to be saved, we simply let the pin go out of scope. In
// this case, the pin's destructor will be invoked, which will call release() on the
// pin. Because our ClientCursorPin is declared after our lock is declared, this
// will happen under the lock.
if (!shouldSaveCursor) {
ruSwapper.reset();
ccPin.deleteUnderlying();
// cc is now invalid, as is the executor
cursorid = 0;
cc = NULL;
curop.debug().cursorExhausted = true;
LOG(5) << "getMore NOT saving client cursor, ended with state "
<< PlanExecutor::statestr(state)
<< endl;
}
else {
// Continue caching the ClientCursor.
cc->incPos(numResults);
exec->saveState();
LOG(5) << "getMore saving client cursor ended with state "
<< PlanExecutor::statestr(state)
<< endl;
if (PlanExecutor::IS_EOF == state && (queryOptions & QueryOption_CursorTailable)) {
if (!txn->getClient()->isInDirectClient()) {
// Don't stash the RU. Get a new one on the next getMore.
ruSwapper->dismiss();
}
if ((queryOptions & QueryOption_AwaitData)
&& (numResults == 0)
&& (pass < 1000)) {
// Bubble up to the AwaitData handling code in receivedGetMore which will
// try again.
return NULL;
}
}
// Possibly note slave's position in the oplog.
if ((queryOptions & QueryOption_OplogReplay) && !slaveReadTill.isNull()) {
cc->slaveReadTill(slaveReadTill);
}
exhaust = (queryOptions & QueryOption_Exhaust);
// If the getmore had a time limit, remaining time is "rolled over" back to the
// cursor (for use by future getmore ops).
cc->setLeftoverMaxTimeMicros( curop.getRemainingMaxTimeMicros() );
}
}
QueryResult::View qr = bb.buf();
qr.msgdata().setLen(bb.len());
qr.msgdata().setOperation(opReply);
qr.setResultFlags(resultFlags);
qr.setCursorId(cursorid);
qr.setStartingFrom(startingResult);
qr.setNReturned(numResults);
bb.decouple();
LOG(5) << "getMore returned " << numResults << " results\n";
return qr;
}
示例2: newGetMore
//.........这里部分代码省略.........
// another getmore. If we receive a EOF or an error, or 'exec' is dead, then we know
// that we will not be producing more results. We indicate that the cursor is closed by
// sending a cursorId of 0 back to the client.
//
// On the other hand, if we retrieve all results necessary for this batch, then
// 'saveClientCursor' is true and we send a valid cursorId back to the client. In
// this case, there may or may not actually be more results (for example, the next call
// to getNext(...) might just return EOF).
bool saveClientCursor = false;
if (PlanExecutor::DEAD == state || PlanExecutor::EXEC_ERROR == state) {
// Propagate this error to caller.
if (PlanExecutor::EXEC_ERROR == state) {
scoped_ptr<PlanStageStats> stats(exec->getStats());
error() << "Plan executor error, stats: "
<< Explain::statsToBSON(*stats);
uasserted(17406, "getMore executor error: " +
WorkingSetCommon::toStatusString(obj));
}
// If we're dead there's no way to get more results.
saveClientCursor = false;
// In the old system tailable capped cursors would be killed off at the
// cursorid level. If a tailable capped cursor is nuked the cursorid
// would vanish.
//
// In the new system they die and are cleaned up later (or time out).
// So this is where we get to remove the cursorid.
if (0 == numResults) {
resultFlags = ResultFlag_CursorNotFound;
}
}
else if (PlanExecutor::IS_EOF == state) {
// EOF is also end of the line unless it's tailable.
saveClientCursor = queryOptions & QueryOption_CursorTailable;
}
else {
verify(PlanExecutor::ADVANCED == state);
saveClientCursor = true;
}
if (!saveClientCursor) {
ruSwapper.reset();
ccPin.deleteUnderlying();
// cc is now invalid, as is the executor
cursorid = 0;
cc = NULL;
QLOG() << "getMore NOT saving client cursor, ended with state "
<< PlanExecutor::statestr(state)
<< endl;
}
else {
// Continue caching the ClientCursor.
cc->incPos(numResults);
exec->saveState();
QLOG() << "getMore saving client cursor ended with state "
<< PlanExecutor::statestr(state)
<< endl;
if (PlanExecutor::IS_EOF == state && (queryOptions & QueryOption_CursorTailable)) {
if (!fromDBDirectClient) {
// Don't stash the RU. Get a new one on the next getMore.
ruSwapper.reset();
delete cc->releaseOwnedRecoveryUnit();
}
if ((queryOptions & QueryOption_AwaitData)
&& (numResults == 0)
&& (pass < 1000)) {
// Bubble up to the AwaitData handling code in receivedGetMore which will
// try again.
return NULL;
}
}
// Possibly note slave's position in the oplog.
if ((queryOptions & QueryOption_OplogReplay) && !slaveReadTill.isNull()) {
cc->slaveReadTill(slaveReadTill);
}
exhaust = (queryOptions & QueryOption_Exhaust);
// If the getmore had a time limit, remaining time is "rolled over" back to the
// cursor (for use by future getmore ops).
cc->setLeftoverMaxTimeMicros( curop.getRemainingMaxTimeMicros() );
}
}
QueryResult::View qr = bb.buf();
qr.msgdata().setLen(bb.len());
qr.msgdata().setOperation(opReply);
qr.setResultFlags(resultFlags);
qr.setCursorId(cursorid);
qr.setStartingFrom(startingResult);
qr.setNReturned(numResults);
bb.decouple();
QLOG() << "getMore returned " << numResults << " results\n";
return qr;
}
示例3: run
//.........这里部分代码省略.........
if (!shardingState->getVersion(nss.ns()).isWriteCompatibleWith(shardingVersionAtStart)) {
// Version changed while retrieving a PlanExecutor. Terminate the operation,
// signaling that mongos should retry.
throw SendStaleConfigException(nss.ns(),
"version changed during find command",
shardingVersionAtStart,
shardingState->getVersion(nss.ns()));
}
if (!collection) {
// No collection. Just fill out curop indicating that there were zero results and
// there is no ClientCursor id, and then return.
const long long numResults = 0;
const CursorId cursorId = 0;
endQueryOp(txn, *exec, dbProfilingLevel, numResults, cursorId);
appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result);
return true;
}
const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
// 4) If possible, register the execution plan inside a ClientCursor, and pin that
// cursor. In this case, ownership of the PlanExecutor is transferred to the
// ClientCursor, and 'exec' becomes null.
//
// First unregister the PlanExecutor so it can be re-registered with ClientCursor.
exec->deregisterExec();
// Create a ClientCursor containing this plan executor. We don't have to worry
// about leaking it as it's inserted into a global map by its ctor.
ClientCursor* cursor =
new ClientCursor(collection->getCursorManager(),
exec.release(),
nss.ns(),
txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
pq.getOptions(),
pq.getFilter());
CursorId cursorId = cursor->cursorid();
ClientCursorPin ccPin(collection->getCursorManager(), cursorId);
// On early return, get rid of the the cursor.
ScopeGuard cursorFreer = MakeGuard(&ClientCursorPin::deleteUnderlying, ccPin);
invariant(!exec);
PlanExecutor* cursorExec = cursor->getExecutor();
// 5) Stream query results, adding them to a BSONArray as we go.
BSONArrayBuilder firstBatch;
BSONObj obj;
PlanExecutor::ExecState state;
long long numResults = 0;
while (!enoughForFirstBatch(pq, numResults, firstBatch.len()) &&
PlanExecutor::ADVANCED == (state = cursorExec->getNext(&obj, NULL))) {
// If adding this object will cause us to exceed the BSON size limit, then we stash
// it for later.
if (firstBatch.len() + obj.objsize() > BSONObjMaxUserSize && numResults > 0) {
cursorExec->enqueue(obj);
break;
}
// Add result to output buffer.
firstBatch.append(obj);
numResults++;
}
// Throw an assertion if query execution fails for any reason.
if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
const std::unique_ptr<PlanStageStats> stats(cursorExec->getStats());
error() << "Plan executor error during find command: " << PlanExecutor::statestr(state)
<< ", stats: " << Explain::statsToBSON(*stats);
return appendCommandStatus(result,
Status(ErrorCodes::OperationFailed,
str::stream()
<< "Executor error during find command: "
<< WorkingSetCommon::toStatusString(obj)));
}
// 6) Set up the cursor for getMore.
if (shouldSaveCursor(txn, collection, state, cursorExec)) {
// State will be restored on getMore.
cursorExec->saveState();
cursorExec->detachFromOperationContext();
cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
cursor->setPos(numResults);
} else {
cursorId = 0;
}
// Fill out curop based on the results.
endQueryOp(txn, *cursorExec, dbProfilingLevel, numResults, cursorId);
// 7) Generate the response object to send to the client.
appendCursorResponseObject(cursorId, nss.ns(), firstBatch.arr(), &result);
if (cursorId) {
cursorFreer.Dismiss();
}
return true;
}
示例4: getMore
//.........这里部分代码省略.........
// If we're replaying the oplog, we save the last time that we read.
Timestamp slaveReadTill;
// What number result are we starting at? Used to fill out the reply.
startingResult = cc->pos();
uint64_t notifierVersion = 0;
std::shared_ptr<CappedInsertNotifier> notifier;
if (isCursorAwaitData(cc)) {
invariant(ctx->getCollection()->isCapped());
// Retrieve the notifier which we will wait on until new data arrives. We make sure
// to do this in the lock because once we drop the lock it is possible for the
// collection to become invalid. The notifier itself will outlive the collection if
// the collection is dropped, as we keep a shared_ptr to it.
notifier = ctx->getCollection()->getCappedInsertNotifier();
// Must get the version before we call generateBatch in case a write comes in after
// that call and before we call wait on the notifier.
notifierVersion = notifier->getVersion();
}
PlanExecutor* exec = cc->getExecutor();
exec->reattachToOperationContext(txn);
exec->restoreState();
PlanExecutor::ExecState state;
generateBatch(ntoreturn, cc, &bb, &numResults, &slaveReadTill, &state);
// If this is an await data cursor, and we hit EOF without generating any results, then
// we block waiting for new data to arrive.
if (isCursorAwaitData(cc) && state == PlanExecutor::IS_EOF && numResults == 0) {
// Save the PlanExecutor and drop our locks.
exec->saveState();
ctx.reset();
// Block waiting for data for up to 1 second.
Seconds timeout(1);
notifier->wait(notifierVersion, timeout);
notifier.reset();
// Set expected latency to match wait time. This makes sure the logs aren't spammed
// by awaitData queries that exceed slowms due to blocking on the CappedInsertNotifier.
curop.setExpectedLatencyMs(durationCount<Milliseconds>(timeout));
// Reacquiring locks.
ctx = make_unique<AutoGetCollectionForRead>(txn, nss);
exec->restoreState();
// We woke up because either the timed_wait expired, or there was more data. Either
// way, attempt to generate another batch of results.
generateBatch(ntoreturn, cc, &bb, &numResults, &slaveReadTill, &state);
}
// We have to do this before re-acquiring locks in the agg case because
// shouldSaveCursorGetMore() can make a network call for agg cursors.
//
// TODO: Getting rid of PlanExecutor::isEOF() in favor of PlanExecutor::IS_EOF would mean
// that this network operation is no longer necessary.
const bool shouldSaveCursor = shouldSaveCursorGetMore(state, exec, isCursorTailable(cc));
// In order to deregister a cursor, we need to be holding the DB + collection lock and
// if the cursor is aggregation, we release these locks.
if (cc->isAggCursor()) {
invariant(NULL == ctx.get());
unpinDBLock = make_unique<Lock::DBLock>(txn->lockState(), nss.db(), MODE_IS);
示例5: run
//.........这里部分代码省略.........
}
std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
if (!collection) {
// No collection. Just fill out curop indicating that there were zero results and
// there is no ClientCursor id, and then return.
const long long numResults = 0;
const CursorId cursorId = 0;
endQueryOp(txn, collection, *exec, dbProfilingLevel, numResults, cursorId);
appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result);
return true;
}
const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
// Stream query results, adding them to a BSONArray as we go.
BSONArrayBuilder firstBatch;
BSONObj obj;
PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
long long numResults = 0;
while (!FindCommon::enoughForFirstBatch(pq, numResults, firstBatch.len()) &&
PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
// If adding this object will cause us to exceed the BSON size limit, then we stash
// it for later.
if (firstBatch.len() + obj.objsize() > BSONObjMaxUserSize && numResults > 0) {
exec->enqueue(obj);
break;
}
// Add result to output buffer.
firstBatch.append(obj);
numResults++;
}
// Throw an assertion if query execution fails for any reason.
if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
const std::unique_ptr<PlanStageStats> stats(exec->getStats());
error() << "Plan executor error during find command: " << PlanExecutor::statestr(state)
<< ", stats: " << Explain::statsToBSON(*stats);
return appendCommandStatus(result,
Status(ErrorCodes::OperationFailed,
str::stream()
<< "Executor error during find command: "
<< WorkingSetCommon::toStatusString(obj)));
}
// TODO: Currently, chunk ranges are kept around until all ClientCursors created while the
// chunk belonged on this node are gone. Separating chunk lifetime management from
// ClientCursor should allow this check to go away.
if (!shardingState->getVersion(nss.ns()).isWriteCompatibleWith(shardingVersionAtStart)) {
// Version changed while retrieving a PlanExecutor. Terminate the operation,
// signaling that mongos should retry.
throw SendStaleConfigException(nss.ns(),
"version changed during find command",
shardingVersionAtStart,
shardingState->getVersion(nss.ns()));
}
// Set up the cursor for getMore.
CursorId cursorId = 0;
if (shouldSaveCursor(txn, collection, state, exec.get())) {
// Register the execution plan inside a ClientCursor. Ownership of the PlanExecutor is
// transferred to the ClientCursor.
//
// First unregister the PlanExecutor so it can be re-registered with ClientCursor.
exec->deregisterExec();
// Create a ClientCursor containing this plan executor. We don't have to worry about
// leaking it as it's inserted into a global map by its ctor.
ClientCursor* cursor =
new ClientCursor(collection->getCursorManager(),
exec.release(),
nss.ns(),
txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
pq.getOptions(),
pq.getFilter());
cursorId = cursor->cursorid();
invariant(!exec);
PlanExecutor* cursorExec = cursor->getExecutor();
// State will be restored on getMore.
cursorExec->saveState();
cursorExec->detachFromOperationContext();
cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
cursor->setPos(numResults);
// Fill out curop based on the results.
endQueryOp(txn, collection, *cursorExec, dbProfilingLevel, numResults, cursorId);
} else {
endQueryOp(txn, collection, *exec, dbProfilingLevel, numResults, cursorId);
}
// Generate the response object to send to the client.
appendCursorResponseObject(cursorId, nss.ns(), firstBatch.arr(), &result);
return true;
}
示例6: run
//.........这里部分代码省略.........
}
txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
if (cursor->isAggCursor()) {
// Agg cursors handle their own locking internally.
ctx.reset(); // unlocks
}
PlanExecutor* exec = cursor->getExecutor();
exec->restoreState(txn);
// If we're tailing a capped collection, retrieve a monotonically increasing insert
// counter.
uint64_t lastInsertCount = 0;
if (isCursorAwaitData(cursor)) {
invariant(ctx->getCollection()->isCapped());
lastInsertCount = ctx->getCollection()->getCappedInsertNotifier()->getCount();
}
CursorId respondWithId = 0;
BSONArrayBuilder nextBatch;
BSONObj obj;
PlanExecutor::ExecState state;
int numResults = 0;
Status batchStatus = generateBatch(cursor, request, &nextBatch, &state, &numResults);
if (!batchStatus.isOK()) {
return appendCommandStatus(result, batchStatus);
}
// If this is an await data cursor, and we hit EOF without generating any results, then
// we block waiting for new oplog data to arrive.
if (isCursorAwaitData(cursor) && state == PlanExecutor::IS_EOF && numResults == 0) {
// Retrieve the notifier which we will wait on until new data arrives. We make sure
// to do this in the lock because once we drop the lock it is possible for the
// collection to become invalid. The notifier itself will outlive the collection if
// the collection is dropped, as we keep a shared_ptr to it.
auto notifier = ctx->getCollection()->getCappedInsertNotifier();
// Save the PlanExecutor and drop our locks.
exec->saveState();
ctx.reset();
// Block waiting for data.
Microseconds timeout(CurOp::get(txn)->getRemainingMaxTimeMicros());
notifier->waitForInsert(lastInsertCount, timeout);
notifier.reset();
ctx.reset(new AutoGetCollectionForRead(txn, request.nss));
exec->restoreState(txn);
// We woke up because either the timed_wait expired, or there was more data. Either
// way, attempt to generate another batch of results.
batchStatus = generateBatch(cursor, request, &nextBatch, &state, &numResults);
if (!batchStatus.isOK()) {
return appendCommandStatus(result, batchStatus);
}
}
if (shouldSaveCursorGetMore(state, exec, isCursorTailable(cursor))) {
respondWithId = request.cursorid;
exec->saveState();
// If maxTimeMS was set directly on the getMore rather than being rolled over
// from a previous find, then don't roll remaining micros over to the next
// getMore.
if (!hasOwnMaxTime) {
cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
}
cursor->incPos(numResults);
if (isCursorTailable(cursor) && state == PlanExecutor::IS_EOF) {
// Rather than swapping their existing RU into the client cursor, tailable
// cursors should get a new recovery unit.
ruSwapper.dismiss();
}
}
else {
CurOp::get(txn)->debug().cursorExhausted = true;
}
appendGetMoreResponseObject(respondWithId, request.nss.ns(), nextBatch.arr(), &result);
if (respondWithId) {
cursorFreer.Dismiss();
// If we are operating on an aggregation cursor, then we dropped our collection lock
// earlier and need to reacquire it in order to clean up our ClientCursorPin.
if (cursor->isAggCursor()) {
invariant(NULL == ctx.get());
unpinDBLock.reset(
new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
unpinCollLock.reset(
new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
}
}
return true;
}
示例7: run
//.........这里部分代码省略.........
return appendCommandStatus(result, Status(ErrorCodes::CursorNotFound, str::stream()
<< "Cursor not found, cursor id: " << request.cursorid));
}
if (request.nss.ns() != cursor->ns()) {
return appendCommandStatus(result, Status(ErrorCodes::Unauthorized, str::stream()
<< "Requested getMore on namespace '" << request.nss.ns()
<< "', but cursor belongs to a different namespace"));
}
// On early return, get rid of the the cursor.
ScopeGuard cursorFreer = MakeGuard(&ClientCursorPin::deleteUnderlying, ccPin);
if (!cursor->hasRecoveryUnit()) {
// Start using a new RecoveryUnit.
cursor->setOwnedRecoveryUnit(
getGlobalServiceContext()->getGlobalStorageEngine()->newRecoveryUnit());
}
// Swap RecoveryUnit(s) between the ClientCursor and OperationContext.
ScopedRecoveryUnitSwapper ruSwapper(cursor, txn);
// Reset timeout timer on the cursor since the cursor is still in use.
cursor->setIdleTime(0);
// If the operation that spawned this cursor had a time limit set, apply leftover
// time to this getmore.
txn->getCurOp()->setMaxTimeMicros(cursor->getLeftoverMaxTimeMicros());
txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
if (cursor->isAggCursor()) {
// Agg cursors handle their own locking internally.
ctx.reset(); // unlocks
}
PlanExecutor* exec = cursor->getExecutor();
exec->restoreState(txn);
// TODO: Handle result sets larger than 16MB.
BSONArrayBuilder nextBatch;
BSONObj obj;
PlanExecutor::ExecState state;
int numResults = 0;
while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
// Add result to output buffer.
nextBatch.append(obj);
numResults++;
if (enoughForGetMore(request.batchSize, numResults, nextBatch.len())) {
break;
}
}
// If we are operating on an aggregation cursor, then we dropped our collection lock
// earlier and need to reacquire it in order to clean up our ClientCursorPin.
//
// TODO: We need to ensure that this relock happens if we release the pin above in
// response to PlanExecutor::getNext() throwing an exception.
if (cursor->isAggCursor()) {
invariant(NULL == ctx.get());
unpinDBLock.reset(new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
unpinCollLock.reset(
new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
}
// Fail the command if the PlanExecutor reports execution failure.
if (PlanExecutor::FAILURE == state) {
const std::unique_ptr<PlanStageStats> stats(exec->getStats());
error() << "GetMore executor error, stats: " << Explain::statsToBSON(*stats);
return appendCommandStatus(result,
Status(ErrorCodes::OperationFailed,
str::stream() << "GetMore executor error: "
<< WorkingSetCommon::toStatusString(obj)));
}
CursorId respondWithId = 0;
if (shouldSaveCursorGetMore(state, exec, isCursorTailable(cursor))) {
respondWithId = request.cursorid;
exec->saveState();
cursor->setLeftoverMaxTimeMicros(txn->getCurOp()->getRemainingMaxTimeMicros());
cursor->incPos(numResults);
if (isCursorTailable(cursor) && state == PlanExecutor::IS_EOF) {
// Rather than swapping their existing RU into the client cursor, tailable
// cursors should get a new recovery unit.
ruSwapper.dismiss();
}
}
else {
txn->getCurOp()->debug().cursorExhausted = true;
}
appendGetMoreResponseObject(respondWithId, request.nss.ns(), nextBatch.arr(), &result);
if (respondWithId) {
cursorFreer.Dismiss();
}
return true;
}
示例8: run
//.........这里部分代码省略.........
getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
{
stdx::lock_guard<Client>(*txn->getClient());
CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
if (!collection) {
// No collection. Just fill out curop indicating that there were zero results and
// there is no ClientCursor id, and then return.
const long long numResults = 0;
const CursorId cursorId = 0;
endQueryOp(txn, collection, *exec, numResults, cursorId);
appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result);
return true;
}
const QueryRequest& originalQR = exec->getCanonicalQuery()->getQueryRequest();
// Stream query results, adding them to a BSONArray as we go.
CursorResponseBuilder firstBatch(/*isInitialResponse*/ true, &result);
BSONObj obj;
PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
long long numResults = 0;
while (!FindCommon::enoughForFirstBatch(originalQR, numResults) &&
PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
// If we can't fit this result inside the current batch, then we stash it for later.
if (!FindCommon::haveSpaceForNext(obj, numResults, firstBatch.bytesUsed())) {
exec->enqueue(obj);
break;
}
// Add result to output buffer.
firstBatch.append(obj);
numResults++;
}
// Throw an assertion if query execution fails for any reason.
if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
firstBatch.abandon();
error() << "Plan executor error during find command: " << PlanExecutor::statestr(state)
<< ", stats: " << Explain::getWinningPlanStats(exec.get());
return appendCommandStatus(result,
Status(ErrorCodes::OperationFailed,
str::stream()
<< "Executor error during find command: "
<< WorkingSetCommon::toStatusString(obj)));
}
// Before saving the cursor, ensure that whatever plan we established happened with the
// expected collection version
auto css = CollectionShardingState::get(txn, nss);
css->checkShardVersionOrThrow(txn);
// Set up the cursor for getMore.
CursorId cursorId = 0;
if (shouldSaveCursor(txn, collection, state, exec.get())) {
// Register the execution plan inside a ClientCursor. Ownership of the PlanExecutor is
// transferred to the ClientCursor.
//
// First unregister the PlanExecutor so it can be re-registered with ClientCursor.
exec->deregisterExec();
// Create a ClientCursor containing this plan executor. We don't have to worry about
// leaking it as it's inserted into a global map by its ctor.
ClientCursor* cursor =
new ClientCursor(collection->getCursorManager(),
exec.release(),
nss.ns(),
txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
originalQR.getOptions(),
cmdObj.getOwned());
cursorId = cursor->cursorid();
invariant(!exec);
PlanExecutor* cursorExec = cursor->getExecutor();
// State will be restored on getMore.
cursorExec->saveState();
cursorExec->detachFromOperationContext();
cursor->setLeftoverMaxTimeMicros(txn->getRemainingMaxTimeMicros());
cursor->setPos(numResults);
// Fill out curop based on the results.
endQueryOp(txn, collection, *cursorExec, numResults, cursorId);
} else {
endQueryOp(txn, collection, *exec, numResults, cursorId);
}
// Generate the response object to send to the client.
firstBatch.done(cursorId, nss.ns());
return true;
}