本文整理汇总了C++中PlanCache类的典型用法代码示例。如果您正苦于以下问题:C++ PlanCache类的具体用法?C++ PlanCache怎么用?C++ PlanCache使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PlanCache类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cc
void CachedPlanRunner::updateCache() {
_updatedCache = true;
if (_killed) {
return;
}
Database* db = cc().database();
// XXX: We need to check for NULL because this is called upon
// destruction of the CachedPlanRunner. In some cases, the db
// or collection could be dropped without kill() being called
// on the runner (for example, timeout of a ClientCursor holding
// the runner).
if (NULL == db) { return; }
Collection* collection = db->getCollection(_canonicalQuery->ns());
if (NULL == collection) { return; }
PlanCache* cache = collection->infoCache()->getPlanCache();
std::auto_ptr<PlanCacheEntryFeedback> feedback(new PlanCacheEntryFeedback());
// XXX: what else can we provide here?
feedback->stats.reset(_exec->getStats());
feedback->score = PlanRanker::scoreTree(feedback->stats.get());
Status fbs = cache->feedback(*_canonicalQuery, feedback.release());
if (!fbs.isOK()) {
QLOG() << _canonicalQuery->ns() << ": Failed to update cache with feedback: "
<< fbs.toString() << " - "
<< "(query: " << _canonicalQuery->getQueryObj()
<< "; sort: " << _canonicalQuery->getParsed().getSort()
<< "; projection: " << _canonicalQuery->getParsed().getProj()
<< ") is no longer in plan cache.";
}
}
示例2: updateCache
void CachedPlanRunner::updateCache() {
_updatedCache = true;
// We're done. Update the cache.
PlanCache* cache = PlanCache::get(_canonicalQuery->ns());
// TODO: Is this an error?
if (NULL == cache) { return; }
// TODO: How do we decide this?
bool shouldRemovePlan = false;
if (shouldRemovePlan) {
if (!cache->remove(*_canonicalQuery, *_cachedQuery->solution)) {
warning() << "Cached plan runner couldn't remove plan from cache. Maybe"
" somebody else did already?";
return;
}
}
// We're done running. Update cache.
auto_ptr<CachedSolutionFeedback> feedback(new CachedSolutionFeedback());
feedback->stats = _exec->getStats();
cache->feedback(*_canonicalQuery, *_cachedQuery->solution, feedback.release());
}
示例3: getRunner
/**
* For a given query, get a runner. The runner could be a SingleSolutionRunner, a
* CachedQueryRunner, or a MultiPlanRunner, depending on the cache/query solver/etc.
*/
Status getRunner(QueryMessage& q, Runner** out) {
CanonicalQuery* rawCanonicalQuery = NULL;
// Canonicalize the query and wrap it in an auto_ptr so we don't leak it if something goes
// wrong.
Status status = CanonicalQuery::canonicalize(q, &rawCanonicalQuery);
if (!status.isOK()) { return status; }
verify(rawCanonicalQuery);
auto_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
// Try to look up a cached solution for the query.
// TODO: Can the cache have negative data about a solution?
PlanCache* localCache = PlanCache::get(canonicalQuery->ns());
CachedSolution* cs = localCache->get(*canonicalQuery);
if (NULL != cs) {
// We have a cached solution. Hand the canonical query and cached solution off to the
// cached plan runner, which takes ownership of both.
WorkingSet* ws;
PlanStage* root;
verify(StageBuilder::build(*cs->solution, &root, &ws));
*out = new CachedPlanRunner(canonicalQuery.release(), cs, root, ws);
return Status::OK();
}
// No entry in cache for the query. We have to solve the query ourself.
vector<QuerySolution*> solutions;
QueryPlanner::plan(*canonicalQuery, &solutions);
// We cannot figure out how to answer the query. Should this ever happen?
if (0 == solutions.size()) {
return Status(ErrorCodes::BadValue, "Can't create a plan for the canonical query " +
canonicalQuery->toString());
}
if (1 == solutions.size()) {
// Only one possible plan. Run it. Build the stages from the solution.
WorkingSet* ws;
PlanStage* root;
verify(StageBuilder::build(*solutions[0], &root, &ws));
// And, run the plan.
*out = new SingleSolutionRunner(canonicalQuery.release(), solutions[0], root, ws);
return Status::OK();
}
else {
// Many solutions. Let the MultiPlanRunner pick the best, update the cache, and so on.
auto_ptr<MultiPlanRunner> mpr(new MultiPlanRunner(canonicalQuery.release()));
for (size_t i = 0; i < solutions.size(); ++i) {
WorkingSet* ws;
PlanStage* root;
verify(StageBuilder::build(*solutions[i], &root, &ws));
// Takes ownership of all arguments.
mpr->addPlan(solutions[i], root, ws);
}
*out = mpr.release();
return Status::OK();
}
}
示例4: run
void run() {
AutoGetCollectionForRead ctx(&_txn, nss.ns());
Collection* collection = ctx.getCollection();
ASSERT(collection);
// Query can be answered by either index on "a" or index on "b".
auto statusWithCQ = CanonicalQuery::canonicalize(nss, fromjson("{a: {$gte: 8}, b: 1}"));
ASSERT_OK(statusWithCQ.getStatus());
const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// We shouldn't have anything in the plan cache for this shape yet.
PlanCache* cache = collection->infoCache()->getPlanCache();
ASSERT(cache);
CachedSolution* rawCachedSolution;
ASSERT_NOT_OK(cache->get(*cq, &rawCachedSolution));
// Get planner params.
QueryPlannerParams plannerParams;
fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
// Queued data stage will return a failure during the cached plan trial period.
auto mockChild = stdx::make_unique<QueuedDataStage>(&_txn, &_ws);
mockChild->pushBack(PlanStage::FAILURE);
// High enough so that we shouldn't trigger a replan based on works.
const size_t decisionWorks = 50;
CachedPlanStage cachedPlanStage(
&_txn, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(nullptr, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy));
// Make sure that we get 2 legit results back.
size_t numResults = 0;
PlanStage::StageState state = PlanStage::NEED_TIME;
while (state != PlanStage::IS_EOF) {
WorkingSetID id = WorkingSet::INVALID_ID;
state = cachedPlanStage.work(&id);
ASSERT_NE(state, PlanStage::FAILURE);
ASSERT_NE(state, PlanStage::DEAD);
if (state == PlanStage::ADVANCED) {
WorkingSetMember* member = _ws.get(id);
ASSERT(cq->root()->matchesBSON(member->obj.value()));
numResults++;
}
}
ASSERT_EQ(numResults, 2U);
// Plan cache should still be empty, as we don't write to it when we replan a failed
// query.
ASSERT_NOT_OK(cache->get(*cq, &rawCachedSolution));
}
示例5: collection
void CachedPlanStage::updatePlanCache() {
const double score = PlanRanker::scoreTree(getStats()->children[0].get());
PlanCache* cache = collection()->infoCache()->getPlanCache();
Status fbs = cache->feedback(*_canonicalQuery, score);
if (!fbs.isOK()) {
LOG(5) << _canonicalQuery->ns() << ": Failed to update cache with feedback: " << redact(fbs)
<< " - "
<< "(query: " << redact(_canonicalQuery->getQueryObj())
<< "; sort: " << _canonicalQuery->getQueryRequest().getSort()
<< "; projection: " << _canonicalQuery->getQueryRequest().getProj()
<< ") is no longer in plan cache.";
}
}
示例6: TEST_F
TEST_F(QueryStageCachedPlan, DeactivatesEntriesOnReplan) {
AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
ASSERT(collection);
// Never run - just used as a key for the cache's get() functions, since all of the other
// CanonicalQueries created in this test will have this shape.
const auto shapeCq =
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 123}, b: {$gte: 123}}"));
// Query can be answered by either index on "a" or index on "b".
const auto noResultsCq =
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 11}, b: {$gte: 11}}"));
// We shouldn't have anything in the plan cache for this shape yet.
PlanCache* cache = collection->infoCache()->getPlanCache();
ASSERT(cache);
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kNotPresent);
// Run the CachedPlanStage with a long-running child plan. Replanning should be
// triggered and an inactive entry will be added.
forceReplanning(collection, noResultsCq.get());
// Check for an inactive cache entry.
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentInactive);
// Run the plan again, to create an active entry.
forceReplanning(collection, noResultsCq.get());
// The works should be 1 for the entry since the query we ran should not have any results.
ASSERT_EQ(cache->get(*noResultsCq.get()).state, PlanCache::CacheEntryState::kPresentActive);
auto entry = assertGet(cache->getEntry(*shapeCq));
size_t works = 1U;
ASSERT_EQ(entry->works, works);
// Run another query which takes long enough to evict the active cache entry. The current
// cache entry's works value is a very low number. When replanning is triggered, the cache
// entry will be deactivated, but the new plan will not overwrite it, since the new plan will
// have a higher works. Therefore, we will be left in an inactive entry which has had its works
// value doubled from 1 to 2.
auto highWorksCq =
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 0}, b: {$gte:0}}"));
forceReplanning(collection, highWorksCq.get());
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentInactive);
ASSERT_EQ(assertGet(cache->getEntry(*shapeCq))->works, 2U);
// Again, force replanning. This time run the initial query which finds no results. The multi
// planner will choose a plan with works value lower than the existing inactive
// entry. Replanning will thus deactivate the existing entry (it's already
// inactive so this is a noop), then create a new entry with a works value of 1.
forceReplanning(collection, noResultsCq.get());
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentActive);
ASSERT_EQ(assertGet(cache->getEntry(*shapeCq))->works, 1U);
}
示例7: getStats
void CachedPlanStage::updatePlanCache() {
std::unique_ptr<PlanCacheEntryFeedback> feedback = stdx::make_unique<PlanCacheEntryFeedback>();
feedback->stats = getStats();
feedback->score = PlanRanker::scoreTree(feedback->stats.get());
PlanCache* cache = _collection->infoCache()->getPlanCache();
Status fbs = cache->feedback(*_canonicalQuery, feedback.release());
if (!fbs.isOK()) {
LOG(5) << _canonicalQuery->ns()
<< ": Failed to update cache with feedback: " << fbs.toString() << " - "
<< "(query: " << _canonicalQuery->getQueryObj()
<< "; sort: " << _canonicalQuery->getParsed().getSort()
<< "; projection: " << _canonicalQuery->getParsed().getProj()
<< ") is no longer in plan cache.";
}
}
示例8: cc
void CachedPlanRunner::updateCache() {
_updatedCache = true;
Database* db = cc().database();
verify(NULL != db);
Collection* collection = db->getCollection(_canonicalQuery->ns());
verify(NULL != collection);
PlanCache* cache = collection->infoCache()->getPlanCache();
std::auto_ptr<PlanCacheEntryFeedback> feedback(new PlanCacheEntryFeedback());
// XXX: what else can we provide here?
feedback->stats.reset(_exec->getStats());
feedback->score = PlanRanker::scoreTree(feedback->stats.get());
Status fbs = cache->feedback(*_canonicalQuery, feedback.release());
if (!fbs.isOK()) {
// XXX: what should happen here?
warning() << "Failed to update cache with feedback: " << fbs.toString() << endl;
}
}
示例9: list
// static
Status PlanCacheListPlans::list(OperationContext* opCtx,
const PlanCache& planCache,
const std::string& ns,
const BSONObj& cmdObj,
BSONObjBuilder* bob) {
auto statusWithCQ = canonicalize(opCtx, ns, cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
if (!internalQueryCacheListPlansNewOutput.load())
return listPlansOriginalFormat(std::move(statusWithCQ.getValue()), planCache, bob);
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
auto entry = uassertStatusOK(planCache.getEntry(*cq));
// internalQueryCacheDisableInactiveEntries is True and we should use the new output format.
Explain::planCacheEntryToBSON(*entry, bob);
return Status::OK();
}
示例10: list
// static
Status PlanCacheListPlans::list(const PlanCache& planCache, const std::string& ns,
const BSONObj& cmdObj, BSONObjBuilder* bob) {
CanonicalQuery* cqRaw;
Status status = canonicalize(ns, cmdObj, &cqRaw);
if (!status.isOK()) {
return status;
}
scoped_ptr<CanonicalQuery> cq(cqRaw);
CachedSolution* crRaw;
Status result = planCache.get(*cq, &crRaw);
if (!result.isOK()) {
return result;
}
scoped_ptr<CachedSolution> cr(crRaw);
BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
size_t numPlans = cr->plannerData.size();
for (size_t i = 0; i < numPlans; ++i) {
BSONObjBuilder planBob(plansBuilder.subobjStart());
// Create plan details field.
// Currently, simple string representationg of
// SolutionCacheData. Need to revisit format when we
// need to parse user-provided plan details for planCacheAddPlan.
SolutionCacheData* scd = cr->plannerData[i];
BSONObjBuilder detailsBob(planBob.subobjStart("details"));
detailsBob.append("solution", scd->toString());
detailsBob.doneFast();
// XXX: Fix these field values once we have fleshed out cache entries.
// reason should contain initial plan stats and score from ranking process.
// feedback should contain execution stats from running the query to completion.
planBob.append("reason", BSONObj());
planBob.append("feedback", BSONObj());
planBob.append("hint", scd->adminHintApplied);
}
plansBuilder.doneFast();
return Status::OK();
}
示例11: invariant
// static
Status PlanCacheListQueryShapes::list(const PlanCache& planCache, BSONObjBuilder* bob) {
invariant(bob);
// Fetch all cached solutions from plan cache.
vector<PlanCacheEntry*> solutions = planCache.getAllEntries();
BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
for (vector<PlanCacheEntry*>::const_iterator i = solutions.begin(); i != solutions.end(); i++) {
PlanCacheEntry* entry = *i;
invariant(entry);
BSONObjBuilder shapeBuilder(arrayBuilder.subobjStart());
shapeBuilder.append("query", entry->query);
shapeBuilder.append("sort", entry->sort);
shapeBuilder.append("projection", entry->projection);
shapeBuilder.doneFast();
// Release resources for cached solution after extracting query shape.
delete entry;
}
arrayBuilder.doneFast();
return Status::OK();
}
示例12: getRunner
/**
* For a given query, get a runner. The runner could be a SingleSolutionRunner, a
* CachedQueryRunner, or a MultiPlanRunner, depending on the cache/query solver/etc.
*/
Status getRunner(CanonicalQuery* rawCanonicalQuery, Runner** out, size_t plannerOptions) {
verify(rawCanonicalQuery);
auto_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
// Try to look up a cached solution for the query.
// TODO: Can the cache have negative data about a solution?
PlanCache* localCache = PlanCache::get(canonicalQuery->ns());
if (NULL != localCache) {
CachedSolution* cs = localCache->get(*canonicalQuery);
if (NULL != cs) {
// We have a cached solution. Hand the canonical query and cached solution off to
// the cached plan runner, which takes ownership of both.
WorkingSet* ws;
PlanStage* root;
verify(StageBuilder::build(*cs->solution, &root, &ws));
*out = new CachedPlanRunner(canonicalQuery.release(), cs, root, ws);
return Status::OK();
}
}
// No entry in cache for the query. We have to solve the query ourself.
// Get the indices that we could possibly use.
Database* db = cc().database();
verify( db );
Collection* collection = db->getCollection( canonicalQuery->ns() );
// This can happen as we're called by internal clients as well.
if (NULL == collection) {
const string& ns = canonicalQuery->ns();
*out = new EOFRunner(canonicalQuery.release(), ns);
return Status::OK();
}
// If we have an _id index we can use the idhack runner.
if (canUseIDHack(*canonicalQuery) && collection->getIndexCatalog()->findIdIndex()) {
*out = new IDHackRunner(collection, canonicalQuery.release());
return Status::OK();
}
// If it's not NULL, we may have indices. Access the catalog and fill out IndexEntry(s)
QueryPlannerParams plannerParams;
for (int i = 0; i < collection->getIndexCatalog()->numIndexesReady(); ++i) {
IndexDescriptor* desc = collection->getIndexCatalog()->getDescriptor( i );
plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
desc->isMultikey(),
desc->isSparse(),
desc->indexName()));
}
// Tailable: If the query requests tailable the collection must be capped.
if (canonicalQuery->getParsed().hasOption(QueryOption_CursorTailable)) {
if (!collection->isCapped()) {
return Status(ErrorCodes::BadValue,
"tailable cursor requested on non capped collection");
}
// If a sort is specified it must be equal to expectedSort.
const BSONObj expectedSort = BSON("$natural" << 1);
const BSONObj& actualSort = canonicalQuery->getParsed().getSort();
if (!actualSort.isEmpty() && !(actualSort == expectedSort)) {
return Status(ErrorCodes::BadValue,
"invalid sort specified for tailable cursor: "
+ actualSort.toString());
}
}
// Process the planning options.
plannerParams.options = plannerOptions;
if (storageGlobalParams.noTableScan) {
const string& ns = canonicalQuery->ns();
// There are certain cases where we ignore this restriction:
bool ignore = canonicalQuery->getQueryObj().isEmpty()
|| (string::npos != ns.find(".system."))
|| (0 == ns.find("local."));
if (!ignore) {
plannerParams.options |= QueryPlannerParams::NO_TABLE_SCAN;
}
}
if (!(plannerParams.options & QueryPlannerParams::NO_TABLE_SCAN)) {
plannerParams.options |= QueryPlannerParams::INCLUDE_COLLSCAN;
}
// If the caller wants a shard filter, make sure we're actually sharded.
if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
CollectionMetadataPtr collMetadata = shardingState.getCollectionMetadata(canonicalQuery->ns());
if (collMetadata) {
plannerParams.shardKey = collMetadata->getKeyPattern();
}
else {
// If there's no metadata don't bother w/the shard filter since we won't know what
// the key pattern is anyway...
plannerParams.options &= ~QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
}
//.........这里部分代码省略.........
示例13: Status
Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
// We're going to start over with a new plan. Clear out info from our old plan.
_results.clear();
_ws->clear();
_children.clear();
// Use the query planning module to plan the whole query.
std::vector<QuerySolution*> rawSolutions;
Status status = QueryPlanner::plan(*_canonicalQuery, _plannerParams, &rawSolutions);
if (!status.isOK()) {
return Status(ErrorCodes::BadValue,
str::stream() << "error processing query: " << _canonicalQuery->toString()
<< " planner returned error: " << status.reason());
}
OwnedPointerVector<QuerySolution> solutions(rawSolutions);
// We cannot figure out how to answer the query. Perhaps it requires an index
// we do not have?
if (0 == solutions.size()) {
return Status(ErrorCodes::BadValue,
str::stream() << "error processing query: " << _canonicalQuery->toString()
<< " No query solutions");
}
if (1 == solutions.size()) {
// If there's only one solution, it won't get cached. Make sure to evict the existing
// cache entry if requested by the caller.
if (shouldCache) {
PlanCache* cache = _collection->infoCache()->getPlanCache();
cache->remove(*_canonicalQuery);
}
PlanStage* newRoot;
// Only one possible plan. Build the stages from the solution.
verify(StageBuilder::build(
getOpCtx(), _collection, *_canonicalQuery, *solutions[0], _ws, &newRoot));
_children.emplace_back(newRoot);
_replannedQs.reset(solutions.popAndReleaseBack());
LOG(1)
<< "Replanning of query resulted in single query solution, which will not be cached. "
<< _canonicalQuery->toStringShort()
<< " plan summary after replan: " << Explain::getPlanSummary(child().get())
<< " previous cache entry evicted: " << (shouldCache ? "yes" : "no");
return Status::OK();
}
// Many solutions. Create a MultiPlanStage to pick the best, update the cache,
// and so on. The working set will be shared by all candidate plans.
auto cachingMode = shouldCache ? MultiPlanStage::CachingMode::AlwaysCache
: MultiPlanStage::CachingMode::NeverCache;
_children.emplace_back(
new MultiPlanStage(getOpCtx(), _collection, _canonicalQuery, cachingMode));
MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(child().get());
for (size_t ix = 0; ix < solutions.size(); ++ix) {
if (solutions[ix]->cacheData.get()) {
solutions[ix]->cacheData->indexFilterApplied = _plannerParams.indexFiltersApplied;
}
PlanStage* nextPlanRoot;
verify(StageBuilder::build(
getOpCtx(), _collection, *_canonicalQuery, *solutions[ix], _ws, &nextPlanRoot));
// Takes ownership of 'solutions[ix]' and 'nextPlanRoot'.
multiPlanStage->addPlan(solutions.releaseAt(ix), nextPlanRoot, _ws);
}
// Delegate to the MultiPlanStage's plan selection facility.
Status pickBestPlanStatus = multiPlanStage->pickBestPlan(yieldPolicy);
if (!pickBestPlanStatus.isOK()) {
return pickBestPlanStatus;
}
LOG(1) << "Replanning " << _canonicalQuery->toStringShort()
<< " resulted in plan with summary: " << Explain::getPlanSummary(child().get())
<< ", which " << (shouldCache ? "has" : "has not") << " been written to the cache";
return Status::OK();
}
示例14: workAllPlans
bool MultiPlanRunner::pickBestPlan(size_t* out, BSONObj* objOut) {
static const int timesEachPlanIsWorked = 100;
// Run each plan some number of times.
for (int i = 0; i < timesEachPlanIsWorked; ++i) {
bool moreToDo = workAllPlans(objOut);
if (!moreToDo) { break; }
}
if (_failure || _killed) { return false; }
// After picking best plan, ranking will own plan stats from
// candidate solutions (winner and losers).
std::auto_ptr<PlanRankingDecision> ranking(new PlanRankingDecision);
size_t bestChild = PlanRanker::pickBestPlan(_candidates, ranking.get());
// Copy candidate order. We will need this to sort candidate stats for explain
// after transferring ownership of 'ranking' to plan cache.
std::vector<size_t> candidateOrder = ranking->candidateOrder;
// Run the best plan. Store it.
_bestPlan.reset(new PlanExecutor(_candidates[bestChild].ws,
_candidates[bestChild].root));
_bestPlan->setYieldPolicy(_policy);
_alreadyProduced = _candidates[bestChild].results;
_bestSolution.reset(_candidates[bestChild].solution);
QLOG() << "Winning solution:\n" << _bestSolution->toString() << endl;
size_t backupChild = bestChild;
if (_bestSolution->hasBlockingStage && (0 == _alreadyProduced.size())) {
QLOG() << "Winner has blocking stage, looking for backup plan...\n";
for (size_t i = 0; i < _candidates.size(); ++i) {
if (!_candidates[i].solution->hasBlockingStage) {
QLOG() << "Candidate " << i << " is backup child\n";
backupChild = i;
_backupSolution = _candidates[i].solution;
_backupAlreadyProduced = _candidates[i].results;
_backupPlan = new PlanExecutor(_candidates[i].ws, _candidates[i].root);
_backupPlan->setYieldPolicy(_policy);
break;
}
}
}
// Store the choice we just made in the cache. We do
// not cache the query if:
// 1) The query is of a type that is not safe to cache, or
// 2) the winning plan did not actually produce any results,
// without hitting EOF. In this case, we have no information to
// suggest that this plan is good.
const PlanStageStats* bestStats = ranking->stats.vector()[0];
if (PlanCache::shouldCacheQuery(*_query)
&& (!_alreadyProduced.empty() || bestStats->common.isEOF)) {
Database* db = cc().database();
verify(NULL != db);
Collection* collection = db->getCollection(_query->ns());
verify(NULL != collection);
PlanCache* cache = collection->infoCache()->getPlanCache();
// Create list of candidate solutions for the cache with
// the best solution at the front.
std::vector<QuerySolution*> solutions;
// Generate solutions and ranking decisions sorted by score.
for (size_t orderingIndex = 0;
orderingIndex < candidateOrder.size(); ++orderingIndex) {
// index into candidates/ranking
size_t i = candidateOrder[orderingIndex];
solutions.push_back(_candidates[i].solution);
}
// Check solution cache data. Do not add to cache if
// we have any invalid SolutionCacheData data.
// XXX: One known example is 2D queries
bool validSolutions = true;
for (size_t i = 0; i < solutions.size(); ++i) {
if (NULL == solutions[i]->cacheData.get()) {
QLOG() << "Not caching query because this solution has no cache data: "
<< solutions[i]->toString();
validSolutions = false;
break;
}
}
if (validSolutions) {
cache->add(*_query, solutions, ranking.release());
}
}
// Clear out the candidate plans, leaving only stats as we're all done w/them.
// Traverse candidate plans in order or score
for (size_t orderingIndex = 0;
orderingIndex < candidateOrder.size(); ++orderingIndex) {
// index into candidates/ranking
size_t i = candidateOrder[orderingIndex];
if (i == bestChild) { continue; }
if (i == backupChild) { continue; }
delete _candidates[i].solution;
//.........这里部分代码省略.........
示例15: getRunner
/**
* For a given query, get a runner. The runner could be a SingleSolutionRunner, a
* CachedQueryRunner, or a MultiPlanRunner, depending on the cache/query solver/etc.
*/
Status getRunner(CanonicalQuery* rawCanonicalQuery, Runner** out) {
verify(rawCanonicalQuery);
auto_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
// Try to look up a cached solution for the query.
// TODO: Can the cache have negative data about a solution?
PlanCache* localCache = PlanCache::get(canonicalQuery->ns());
if (NULL != localCache) {
CachedSolution* cs = localCache->get(*canonicalQuery);
if (NULL != cs) {
// We have a cached solution. Hand the canonical query and cached solution off to
// the cached plan runner, which takes ownership of both.
WorkingSet* ws;
PlanStage* root;
verify(StageBuilder::build(*cs->solution, &root, &ws));
*out = new CachedPlanRunner(canonicalQuery.release(), cs, root, ws);
return Status::OK();
}
}
// No entry in cache for the query. We have to solve the query ourself.
// Get the indices that we could possibly use.
NamespaceDetails* nsd = nsdetails(canonicalQuery->ns().c_str());
// If this is NULL, there is no data but the query is valid. You're allowed to query for
// data on an empty collection and it's not an error. There just isn't any data...
if (NULL == nsd) {
const std::string& ns = canonicalQuery->ns();
*out = new EOFRunner(canonicalQuery.release(), ns);
return Status::OK();
}
// Tailable: If the query requests tailable the collection must be capped.
if (canonicalQuery->getParsed().hasOption(QueryOption_CursorTailable)) {
if (!nsd->isCapped()) {
return Status(ErrorCodes::BadValue,
"tailable cursor requested on non capped collection");
}
// If a sort is specified it must be equal to expectedSort.
const BSONObj expectedSort = BSON("$natural" << 1);
const BSONObj& actualSort = canonicalQuery->getParsed().getSort();
if (!actualSort.isEmpty() && !(actualSort == expectedSort)) {
return Status(ErrorCodes::BadValue,
"invalid sort specified for tailable cursor: "
+ actualSort.toString());
}
}
// If it's not NULL, we may have indices.
vector<IndexEntry> indices;
for (int i = 0; i < nsd->getCompletedIndexCount(); ++i) {
auto_ptr<IndexDescriptor> desc(CatalogHack::getDescriptor(nsd, i));
indices.push_back(IndexEntry(desc->keyPattern(), desc->isMultikey(), desc->isSparse(), desc->indexName()));
}
vector<QuerySolution*> solutions;
size_t options = QueryPlanner::DEFAULT;
if (storageGlobalParams.noTableScan) {
const string& ns = canonicalQuery->ns();
// There are certain cases where we ignore this restriction:
bool ignore = canonicalQuery->getQueryObj().isEmpty()
|| (string::npos != ns.find(".system."))
|| (0 == ns.find("local."));
if (!ignore) {
options |= QueryPlanner::NO_TABLE_SCAN;
}
}
else {
options |= QueryPlanner::INCLUDE_COLLSCAN;
}
QueryPlanner::plan(*canonicalQuery, indices, options, &solutions);
/*
for (size_t i = 0; i < solutions.size(); ++i) {
QLOG() << "solution " << i << " is " << solutions[i]->toString() << endl;
}
*/
// We cannot figure out how to answer the query. Should this ever happen?
if (0 == solutions.size()) {
return Status(ErrorCodes::BadValue, "Can't create a plan for the canonical query " +
canonicalQuery->toString());
}
if (1 == solutions.size()) {
// Only one possible plan. Run it. Build the stages from the solution.
WorkingSet* ws;
PlanStage* root;
verify(StageBuilder::build(*solutions[0], &root, &ws));
// And, run the plan.
*out = new SingleSolutionRunner(canonicalQuery.release(), solutions[0], root, ws);
return Status::OK();
}
//.........这里部分代码省略.........