本文整理汇总了C++中OwnedPointerVector::vector方法的典型用法代码示例。如果您正苦于以下问题:C++ OwnedPointerVector::vector方法的具体用法?C++ OwnedPointerVector::vector怎么用?C++ OwnedPointerVector::vector使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类OwnedPointerVector
的用法示例。
在下文中一共展示了OwnedPointerVector::vector方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: clone
PlanCacheEntry* PlanCacheEntry::clone() const {
OwnedPointerVector<QuerySolution> solutions;
for (size_t i = 0; i < plannerData.size(); ++i) {
QuerySolution* qs = new QuerySolution();
qs->cacheData.reset(plannerData[i]->clone());
solutions.mutableVector().push_back(qs);
}
PlanCacheEntry* entry = new PlanCacheEntry(solutions.vector(), decision->clone());
entry->backupSoln = backupSoln;
// Copy query shape.
entry->query = query.getOwned();
entry->sort = sort.getOwned();
entry->projection = projection.getOwned();
// Copy performance stats.
for (size_t i = 0; i < feedback.size(); ++i) {
PlanCacheEntryFeedback* fb = new PlanCacheEntryFeedback();
fb->stats.reset(feedback[i]->stats->clone());
fb->score = feedback[i]->score;
entry->feedback.push_back(fb);
}
entry->averageScore = averageScore;
entry->stddevScore = stddevScore;
return entry;
}
示例2: buildApplyOpsCmd
BSONObj buildApplyOpsCmd( const OwnedPointerVector<ChunkType>& chunksToMerge,
const ChunkVersion& currShardVersion,
const ChunkVersion& newMergedVersion ) {
BSONObjBuilder applyOpsCmdB;
BSONArrayBuilder updatesB( applyOpsCmdB.subarrayStart( "applyOps" ) );
// The chunk we'll be "expanding" is the first chunk
const ChunkType* chunkToMerge = *chunksToMerge.begin();
// Fill in details not tracked by metadata
ChunkType mergedChunk;
chunkToMerge->cloneTo( &mergedChunk );
mergedChunk.setName( Chunk::genID( chunkToMerge->getNS(), chunkToMerge->getMin() ) );
mergedChunk.setMax( ( *chunksToMerge.vector().rbegin() )->getMax() );
mergedChunk.setVersion( newMergedVersion );
updatesB.append( buildOpMergeChunk( mergedChunk ) );
// Don't remove chunk we're expanding
OwnedPointerVector<ChunkType>::const_iterator it = chunksToMerge.begin();
for ( ++it; it != chunksToMerge.end(); ++it ) {
ChunkType* chunkToMerge = *it;
chunkToMerge->setName( Chunk::genID( chunkToMerge->getNS(), chunkToMerge->getMin() ) );
updatesB.append( buildOpRemoveChunk( *chunkToMerge ) );
}
updatesB.done();
applyOpsCmdB.append( "preCondition",
buildOpPrecond( chunkToMerge->getNS(),
chunkToMerge->getShard(),
currShardVersion ) );
return applyOpsCmdB.obj();
}
示例3: explainStages
// static
void Explain::explainStages(PlanExecutor* exec,
ExplainCommon::Verbosity verbosity,
BSONObjBuilder* out) {
//
// Step 1: run the stages as required by the verbosity level.
//
// Inspect the tree to see if there is a MultiPlanStage.
MultiPlanStage* mps = getMultiPlanStage(exec->getRootStage());
// Get stats of the winning plan from the trial period, if the verbosity level
// is high enough and there was a runoff between multiple plans.
auto_ptr<PlanStageStats> winningStatsTrial;
if (verbosity >= ExplainCommon::EXEC_ALL_PLANS && NULL != mps) {
winningStatsTrial.reset(exec->getStats());
invariant(winningStatsTrial.get());
}
// If we need execution stats, then run the plan in order to gather the stats.
Status executePlanStatus = Status::OK();
if (verbosity >= ExplainCommon::EXEC_STATS) {
executePlanStatus = exec->executePlan();
}
//
// Step 2: collect plan stats (which also give the structure of the plan tree).
//
// Get stats for the winning plan.
scoped_ptr<PlanStageStats> winningStats(exec->getStats());
// Get stats for the rejected plans, if more than one plan was considered.
OwnedPointerVector<PlanStageStats> allPlansStats;
if (NULL != mps) {
allPlansStats = mps->generateCandidateStats();
}
//
// Step 3: use the stats trees to produce explain BSON.
//
CanonicalQuery* query = exec->getCanonicalQuery();
if (verbosity >= ExplainCommon::QUERY_PLANNER) {
generatePlannerInfo(query, winningStats.get(), allPlansStats.vector(), out);
}
if (verbosity >= ExplainCommon::EXEC_STATS) {
BSONObjBuilder execBob(out->subobjStart("executionStats"));
// If there is an execution error while running the query, the error is reported under
// the "executionStats" section and the explain as a whole succeeds.
execBob.append("executionSuccess", executePlanStatus.isOK());
if (!executePlanStatus.isOK()) {
execBob.append("errorMessage", executePlanStatus.reason());
execBob.append("errorCode", executePlanStatus.code());
}
// Generate exec stats BSON for the winning plan.
OperationContext* opCtx = exec->getOpCtx();
long long totalTimeMillis = opCtx->getCurOp()->elapsedMillis();
generateExecStats(winningStats.get(), verbosity, &execBob, totalTimeMillis);
// Also generate exec stats for all plans, if the verbosity level is high enough.
// These stats reflect what happened during the trial period that ranked the plans.
if (verbosity >= ExplainCommon::EXEC_ALL_PLANS) {
// If we ranked multiple plans against each other, then add stats collected
// from the trial period of the winning plan. The "allPlansExecution" section
// will contain an apples-to-apples comparison of the winning plan's stats against
// all rejected plans' stats collected during the trial period.
if (NULL != mps) {
invariant(winningStatsTrial.get());
allPlansStats.push_back(winningStatsTrial.release());
}
BSONArrayBuilder allPlansBob(execBob.subarrayStart("allPlansExecution"));
for (size_t i = 0; i < allPlansStats.size(); ++i) {
BSONObjBuilder planBob(allPlansBob.subobjStart());
generateExecStats(allPlansStats[i], verbosity, &planBob);
planBob.doneFast();
}
allPlansBob.doneFast();
}
execBob.doneFast();
}
generateServerInfo(out);
}
示例4: updateRecord
/** Note: if the object shrinks a lot, we don't free up space, we leave extra at end of the record.
*/
const DiskLoc DataFileMgr::updateRecord(
const char *ns,
Collection* collection,
Record *toupdate, const DiskLoc& dl,
const char *_buf, int _len, OpDebug& debug, bool god) {
dassert( toupdate == dl.rec() );
BSONObj objOld = BSONObj::make(toupdate);
BSONObj objNew(_buf);
DEV verify( objNew.objsize() == _len );
DEV verify( objNew.objdata() == _buf );
if( !objNew.hasElement("_id") && objOld.hasElement("_id") ) {
/* add back the old _id value if the update removes it. Note this implementation is slow
(copies entire object multiple times), but this shouldn't happen often, so going for simple
code, not speed.
*/
BSONObjBuilder b;
BSONElement e;
verify( objOld.getObjectID(e) );
b.append(e); // put _id first, for best performance
b.appendElements(objNew);
objNew = b.obj();
}
NamespaceString nsstring(ns);
if (nsstring.coll() == "system.users") {
V2UserDocumentParser parser;
uassertStatusOK(parser.checkValidUserDocument(objNew));
}
uassert( 13596 , str::stream() << "cannot change _id of a document old:" << objOld << " new:" << objNew,
objNew["_id"] == objOld["_id"]);
/* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
*/
OwnedPointerVector<UpdateTicket> updateTickets;
updateTickets.mutableVector().resize(collection->details()->getTotalIndexCount());
for (int i = 0; i < collection->details()->getTotalIndexCount(); ++i) {
auto_ptr<IndexDescriptor> descriptor(CatalogHack::getDescriptor(collection->details(), i));
auto_ptr<IndexAccessMethod> iam(CatalogHack::getIndex(descriptor.get()));
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed = !(KeyPattern::isIdKeyPattern(descriptor->keyPattern())
|| descriptor->unique())
|| ignoreUniqueIndex(descriptor->getOnDisk());
updateTickets.mutableVector()[i] = new UpdateTicket();
Status ret = iam->validateUpdate(objOld, objNew, dl, options,
updateTickets.mutableVector()[i]);
if (Status::OK() != ret) {
uasserted(ASSERT_ID_DUPKEY, "Update validation failed: " + ret.toString());
}
}
if ( toupdate->netLength() < objNew.objsize() ) {
// doesn't fit. reallocate -----------------------------------------------------
moveCounter.increment();
uassert( 10003,
"failing update: objects in a capped ns cannot grow",
!(collection && collection->details()->isCapped()));
collection->details()->paddingTooSmall();
deleteRecord(ns, toupdate, dl);
DiskLoc res = insert(ns, objNew.objdata(), objNew.objsize(), false, god);
if (debug.nmoved == -1) // default of -1 rather than 0
debug.nmoved = 1;
else
debug.nmoved += 1;
return res;
}
collection->infoCache()->notifyOfWriteOp();
collection->details()->paddingFits();
debug.keyUpdates = 0;
for (int i = 0; i < collection->details()->getTotalIndexCount(); ++i) {
auto_ptr<IndexDescriptor> descriptor(CatalogHack::getDescriptor(collection->details(), i));
auto_ptr<IndexAccessMethod> iam(CatalogHack::getIndex(descriptor.get()));
int64_t updatedKeys;
Status ret = iam->update(*updateTickets.vector()[i], &updatedKeys);
if (Status::OK() != ret) {
// This shouldn't happen unless something disastrous occurred.
massert(16799, "update failed: " + ret.toString(), false);
}
debug.keyUpdates += updatedKeys;
}
// update in place
int sz = objNew.objsize();
memcpy(getDur().writingPtr(toupdate->data(), sz), objNew.objdata(), sz);
return dl;
}
示例5: if
//.........这里部分代码省略.........
copyStatus = copyFrozenCollection(configLoc,
ChunkType::ConfigNS,
ChunkType::ConfigNS + backupSuffix);
if (!copyStatus.isOK()) {
*errMsg = stream() << "could not copy " << ChunkType::ConfigNS << " to "
<< (ChunkType::ConfigNS + backupSuffix) << causedBy(copyStatus);
return false;
}
//
// Go through sharded collections one-by-one and add epochs where missing
//
for (map<string, CollectionType*>::const_iterator it = collections.begin();
it != collections.end(); ++it)
{
// Create a copy so that we can change the epoch later
CollectionType collection;
it->second->cloneTo(&collection);
log() << "checking epochs for " << collection.getNS() << " collection..." << endl;
OID epoch = collection.getEpoch();
//
// Go through chunks to find epoch if we haven't found it or to verify epoch is the same
//
OwnedPointerVector<ChunkType> ownedChunks;
const vector<ChunkType*>& chunks = ownedChunks.vector();
Status findChunksStatus = findAllChunks(configLoc, collection.getNS(), &ownedChunks);
if (!findChunksStatus.isOK()) {
*errMsg = stream() << "could not read chunks from config server"
<< causedBy(findChunksStatus);
return false;
}
for (vector<ChunkType*>::const_iterator chunkIt = chunks.begin();
chunkIt != chunks.end(); ++chunkIt)
{
const ChunkType& chunk = *(*chunkIt);
// If our chunk epoch is set and doesn't match
if (epoch.isSet() && chunk.getVersion().epoch().isSet()
&& chunk.getVersion().epoch() != epoch)
{
*errMsg = stream() << "chunk epoch for " << chunk.toString() << " in "
<< collection.getNS() << " does not match found epoch "
<< epoch;
return false;
}
else if (!epoch.isSet() && chunk.getVersion().epoch().isSet()) {
epoch = chunk.getVersion().epoch();
}
}
示例6: getExtentManager
StatusWith<DiskLoc> Collection::updateDocument( const DiskLoc& oldLocation,
const BSONObj& objNew,
bool enforceQuota,
OpDebug* debug ) {
Record* oldRecord = getExtentManager()->recordFor( oldLocation );
BSONObj objOld = BSONObj::make( oldRecord );
if ( objOld.hasElement( "_id" ) ) {
BSONElement oldId = objOld["_id"];
BSONElement newId = objNew["_id"];
if ( oldId != newId )
return StatusWith<DiskLoc>( ErrorCodes::InternalError,
"in Collection::updateDocument _id mismatch",
13596 );
}
if ( ns().coll() == "system.users" ) {
// XXX - andy and spencer think this should go away now
V2UserDocumentParser parser;
Status s = parser.checkValidUserDocument(objNew);
if ( !s.isOK() )
return StatusWith<DiskLoc>( s );
}
/* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
*/
OwnedPointerVector<UpdateTicket> updateTickets;
updateTickets.mutableVector().resize(_indexCatalog.numIndexesTotal());
for (int i = 0; i < _indexCatalog.numIndexesTotal(); ++i) {
IndexDescriptor* descriptor = _indexCatalog.getDescriptor( i );
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed =
!(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique())
|| ignoreUniqueIndex(descriptor);
updateTickets.mutableVector()[i] = new UpdateTicket();
Status ret = iam->validateUpdate(objOld, objNew, oldLocation, options,
updateTickets.mutableVector()[i]);
if ( !ret.isOK() ) {
return StatusWith<DiskLoc>( ret );
}
}
if ( oldRecord->netLength() < objNew.objsize() ) {
// doesn't fit, have to move to new location
if ( _details->isCapped() )
return StatusWith<DiskLoc>( ErrorCodes::InternalError,
"failing update: objects in a capped ns cannot grow",
10003 );
moveCounter.increment();
_details->paddingTooSmall();
// unindex old record, don't delete
// this way, if inserting new doc fails, we can re-index this one
ClientCursor::aboutToDelete(_ns.ns(), _details, oldLocation);
_indexCatalog.unindexRecord( objOld, oldLocation, true );
if ( debug ) {
if (debug->nmoved == -1) // default of -1 rather than 0
debug->nmoved = 1;
else
debug->nmoved += 1;
}
StatusWith<DiskLoc> loc = insertDocument( objNew, enforceQuota );
if ( loc.isOK() ) {
// insert successful, now lets deallocate the old location
// remember its already unindexed
_recordStore.deallocRecord( oldLocation, oldRecord );
}
else {
// new doc insert failed, so lets re-index the old document and location
_indexCatalog.indexRecord( objOld, oldLocation );
}
return loc;
}
_infoCache.notifyOfWriteOp();
_details->paddingFits();
if ( debug )
debug->keyUpdates = 0;
for (int i = 0; i < _indexCatalog.numIndexesTotal(); ++i) {
IndexDescriptor* descriptor = _indexCatalog.getDescriptor( i );
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
int64_t updatedKeys;
Status ret = iam->update(*updateTickets.vector()[i], &updatedKeys);
if ( !ret.isOK() )
return StatusWith<DiskLoc>( ret );
if ( debug )
//.........这里部分代码省略.........
示例7: fillOutResults
PlanStage::StageState TextStage::fillOutResults() {
Database* db = cc().database();
Collection* collection = db->getCollection( _params.ns );
if (NULL == collection) {
warning() << "TextStage params namespace error";
return PlanStage::FAILURE;
}
vector<IndexDescriptor*> idxMatches;
collection->getIndexCatalog()->findIndexByType("text", idxMatches);
if (1 != idxMatches.size()) {
warning() << "Expected exactly one text index";
return PlanStage::FAILURE;
}
// Get all the index scans for each term in our query.
OwnedPointerVector<PlanStage> scanners;
for (size_t i = 0; i < _params.query.getTerms().size(); i++) {
const string& term = _params.query.getTerms()[i];
IndexScanParams params;
params.bounds.startKey = FTSIndexFormat::getIndexKey(MAX_WEIGHT, term,
_params.indexPrefix);
params.bounds.endKey = FTSIndexFormat::getIndexKey(0, term, _params.indexPrefix);
params.bounds.endKeyInclusive = true;
params.bounds.isSimpleRange = true;
params.descriptor = idxMatches[0];
params.direction = -1;
IndexScan* ixscan = new IndexScan(params, _ws, NULL);
scanners.mutableVector().push_back(ixscan);
}
// Map: diskloc -> aggregate score for doc.
typedef unordered_map<DiskLoc, double, DiskLoc::Hasher> ScoreMap;
ScoreMap scores;
// For each index scan, read all results and store scores.
size_t currentIndexScanner = 0;
while (currentIndexScanner < scanners.size()) {
BSONObj keyObj;
DiskLoc loc;
WorkingSetID id;
PlanStage::StageState state = scanners.vector()[currentIndexScanner]->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* wsm = _ws->get(id);
IndexKeyDatum& keyDatum = wsm->keyData.back();
filterAndScore(keyDatum.keyData, wsm->loc, &scores[wsm->loc]);
_ws->free(id);
}
else if (PlanStage::IS_EOF == state) {
// Done with this scan.
++currentIndexScanner;
}
else if (PlanStage::NEED_FETCH == state) {
// We're calling work() on ixscans and they have no way to return a fetch.
verify(false);
}
else if (PlanStage::NEED_TIME == state) {
// We are a blocking stage, so ignore scanner's request for more time.
}
else {
verify(PlanStage::FAILURE == state);
warning() << "error from index scan during text stage: invalid FAILURE state";
return PlanStage::FAILURE;
}
}
// Filter for phrases and negative terms, score and truncate.
for (ScoreMap::iterator i = scores.begin(); i != scores.end(); ++i) {
DiskLoc loc = i->first;
double score = i->second;
// Ignore non-matched documents.
if (score < 0) {
continue;
}
// Filter for phrases and negated terms
if (_params.query.hasNonTermPieces()) {
if (!_ftsMatcher.matchesNonTerm(loc.obj())) {
continue;
}
}
// Add results to working set as LOC_AND_UNOWNED_OBJ initially.
// On invalidation, we copy the object and change the state to
// OWNED_OBJ.
// Fill out a WSM.
WorkingSetID id = _ws->allocate();
WorkingSetMember* member = _ws->get(id);
member->loc = loc;
member->obj = member->loc.obj();
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
member->addComputed(new TextScoreComputedData(score));
_results.push_back(id);
_wsidByDiskLoc[member->loc] = id;
}
_filledOutResults = true;
//.........这里部分代码省略.........