本文整理匯總了C++中Collection類的典型用法代碼示例。如果您正苦於以下問題:C++ Collection類的具體用法?C++ Collection怎麽用?C++ Collection使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Collection類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的C++代碼示例。
示例1: runCount
long long runCount(OperationContext* txn,
const string& ns,
const BSONObj &cmd,
string &err,
int &errCode) {
AutoGetCollectionForRead ctx(txn, ns);
Collection* collection = ctx.getCollection();
if (NULL == collection) {
err = "ns missing";
return -1;
}
const NamespaceString nss(ns);
CountRequest request;
CmdCount* countComm = static_cast<CmdCount*>(Command::findCommand("count"));
Status parseStatus = countComm->parseRequest(nss.db().toString(), cmd, &request);
if (!parseStatus.isOK()) {
err = parseStatus.reason();
errCode = parseStatus.code();
return -1;
}
if (request.query.isEmpty()) {
return applySkipLimit(collection->numRecords(txn), cmd);
}
PlanExecutor* rawExec;
Status getExecStatus = getExecutorCount(txn,
collection,
request,
PlanExecutor::YIELD_AUTO,
&rawExec);
if (!getExecStatus.isOK()) {
err = getExecStatus.reason();
errCode = getExecStatus.code();
return -1;
}
scoped_ptr<PlanExecutor> exec(rawExec);
// Store the plan summary string in CurOp.
if (NULL != txn->getCurOp()) {
txn->getCurOp()->debug().planSummary = Explain::getPlanSummary(exec.get());
}
Status execPlanStatus = exec->executePlan();
if (!execPlanStatus.isOK()) {
err = execPlanStatus.reason();
errCode = execPlanStatus.code();
return -2;
}
// Plan is done executing. We just need to pull the count out of the root stage.
invariant(STAGE_COUNT == exec->getRootStage()->stageType());
CountStage* countStage = static_cast<CountStage*>(exec->getRootStage());
const CountStats* countStats =
static_cast<const CountStats*>(countStage->getSpecificStats());
return countStats->nCounted;
}
示例2: verify
Runner::RunnerState MultiPlanRunner::getNext(BSONObj* objOut, DiskLoc* dlOut) {
if (_killed) { return Runner::RUNNER_DEAD; }
if (_failure) { return Runner::RUNNER_ERROR; }
// If we haven't picked the best plan yet...
if (NULL == _bestPlan) {
if (!pickBestPlan(NULL, objOut)) {
verify(_failure || _killed);
if (_killed) { return Runner::RUNNER_DEAD; }
if (_failure) { return Runner::RUNNER_ERROR; }
}
}
// Look for an already produced result that provides the data the caller wants.
while (!_alreadyProduced.empty()) {
WorkingSetID id = _alreadyProduced.front();
_alreadyProduced.pop_front();
WorkingSetMember* member = _bestPlan->getWorkingSet()->get(id);
// Note that this copies code from PlanExecutor.
if (NULL != objOut) {
if (WorkingSetMember::LOC_AND_IDX == member->state) {
if (1 != member->keyData.size()) {
_bestPlan->getWorkingSet()->free(id);
// If the caller needs the key data and the WSM doesn't have it, drop the
// result and carry on.
continue;
}
*objOut = member->keyData[0].keyData;
}
else if (member->hasObj()) {
*objOut = member->obj;
}
else {
// If the caller needs an object and the WSM doesn't have it, drop and
// try the next result.
_bestPlan->getWorkingSet()->free(id);
continue;
}
}
if (NULL != dlOut) {
if (member->hasLoc()) {
*dlOut = member->loc;
}
else {
// If the caller needs a DiskLoc and the WSM doesn't have it, drop and carry on.
_bestPlan->getWorkingSet()->free(id);
continue;
}
}
// If we're here, the caller has all the data needed and we've set the out
// parameters. Remove the result from the WorkingSet.
_bestPlan->getWorkingSet()->free(id);
return Runner::RUNNER_ADVANCED;
}
RunnerState state = _bestPlan->getNext(objOut, dlOut);
if (Runner::RUNNER_ERROR == state && (NULL != _backupSolution)) {
QLOG() << "Best plan errored out switching to backup\n";
// Uncache the bad solution if we fall back
// on the backup solution.
//
// XXX: Instead of uncaching we should find a way for the
// cached plan runner to fall back on a different solution
// if the best solution fails. Alternatively we could try to
// defer cache insertion to be after the first produced result.
Database* db = cc().database();
verify(NULL != db);
Collection* collection = db->getCollection(_query->ns());
verify(NULL != collection);
PlanCache* cache = collection->infoCache()->getPlanCache();
cache->remove(*_query);
_bestPlan.reset(_backupPlan);
_backupPlan = NULL;
_bestSolution.reset(_backupSolution);
_backupSolution = NULL;
_alreadyProduced = _backupAlreadyProduced;
return getNext(objOut, dlOut);
}
if (NULL != _backupSolution && Runner::RUNNER_ADVANCED == state) {
QLOG() << "Best plan had a blocking sort, became unblocked, deleting backup plan\n";
delete _backupSolution;
delete _backupPlan;
_backupSolution = NULL;
_backupPlan = NULL;
// TODO: free from WS?
_backupAlreadyProduced.clear();
}
return state;
}
示例3: run
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
const string ns = dbname + "." + cmdObj.firstElement().valuestr();
if (!cmdObj["start"].eoo()) {
errmsg = "using deprecated 'start' argument to geoNear";
return false;
}
Client::ReadContext ctx(txn, ns);
Database* db = ctx.ctx().db();
if ( !db ) {
errmsg = "can't find ns";
return false;
}
Collection* collection = db->getCollection( txn, ns );
if ( !collection ) {
errmsg = "can't find ns";
return false;
}
IndexCatalog* indexCatalog = collection->getIndexCatalog();
// cout << "raw cmd " << cmdObj.toString() << endl;
// We seek to populate this.
string nearFieldName;
bool using2DIndex = false;
if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
return false;
}
PointWithCRS point;
uassert(17304, "'near' field must be point",
GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK());
bool isSpherical = cmdObj["spherical"].trueValue();
if (!using2DIndex) {
uassert(17301, "2dsphere index must have spherical: true", isSpherical);
}
// Build the $near expression for the query.
BSONObjBuilder nearBob;
if (isSpherical) {
nearBob.append("$nearSphere", cmdObj["near"].Obj());
}
else {
nearBob.append("$near", cmdObj["near"].Obj());
}
if (!cmdObj["maxDistance"].eoo()) {
uassert(17299, "maxDistance must be a number",cmdObj["maxDistance"].isNumber());
nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
}
if (!cmdObj["minDistance"].eoo()) {
uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
uassert(17300, "minDistance must be a number",cmdObj["minDistance"].isNumber());
nearBob.append("$minDistance", cmdObj["minDistance"].number());
}
if (!cmdObj["uniqueDocs"].eoo()) {
warning() << ns << ": ignoring deprecated uniqueDocs option in geoNear command";
}
// And, build the full query expression.
BSONObjBuilder queryBob;
queryBob.append(nearFieldName, nearBob.obj());
if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
queryBob.appendElements(cmdObj["query"].Obj());
}
BSONObj rewritten = queryBob.obj();
// cout << "rewritten query: " << rewritten.toString() << endl;
int numWanted = 100;
const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
BSONElement eNumWanted = cmdObj[limitName];
if (!eNumWanted.eoo()) {
uassert(17303, "limit must be number", eNumWanted.isNumber());
numWanted = eNumWanted.numberInt();
uassert(17302, "limit must be >=0", numWanted >= 0);
}
bool includeLocs = false;
if (!cmdObj["includeLocs"].eoo()) {
includeLocs = cmdObj["includeLocs"].trueValue();
}
double distanceMultiplier = 1.0;
BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"];
if (!eDistanceMultiplier.eoo()) {
uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
distanceMultiplier = eDistanceMultiplier.number();
uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
}
BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) <<
"$dis" << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));
//.........這裏部分代碼省略.........
示例4: iterate
static inline void iterate(Collection& collection,
Iterator begin, Iterator end,
buffer_side_selector side,
DistanceStrategy const& distance,
JoinStrategy const& join_strategy, bool close = false)
{
output_point_type previous_p1, previous_p2;
output_point_type first_p1, first_p2;
bool first = true;
Iterator it = begin;
// We want to memorize the last vector too.
typedef BOOST_TYPEOF(*it) point_type;
point_type last_ip1, last_ip2;
for (Iterator prev = it++; it != end; ++it)
{
if (! detail::equals::equals_point_point(*prev, *it))
{
output_point_type p1, p2;
last_ip1 = *prev;
last_ip2 = *it;
generate_side(*prev, *it, side, distance, p1, p2);
std::vector<output_point_type> range_out;
if (! first)
{
output_point_type p;
segment_type s1(p1, p2);
segment_type s2(previous_p1, previous_p2);
if (line_line_intersection<output_point_type, segment_type>::apply(s1, s2, p))
{
join_strategy.apply(p, *prev, previous_p2, p1,
distance.apply(*prev, *it, side),
range_out);
}
}
else
{
first = false;
first_p1 = p1;
first_p2 = p2;
}
if (! range_out.empty())
{
collection.add_piece(buffered_join, *prev, range_out);
range_out.clear();
}
collection.add_piece(buffered_segment, *prev, *it, p1, p2);
previous_p1 = p1;
previous_p2 = p2;
prev = it;
}
}
// Might be replaced by specialization
if(boost::is_same<Tag, ring_tag>::value)
{
// Generate closing corner
output_point_type p;
segment_type s1(previous_p1, previous_p2);
segment_type s2(first_p1, first_p2);
if (line_line_intersection<output_point_type, segment_type>::apply(s1, s2, p))
{
std::vector<output_point_type> range_out;
join_strategy.apply(p, *begin, previous_p2, first_p1,
distance.apply(*(end - 1), *begin, side),
range_out);
if (! range_out.empty())
{
collection.add_piece(buffered_join, *begin, range_out);
}
}
// Buffer is closed automatically by last closing corner (NOT FOR OPEN POLYGONS - TODO)
}
else if (boost::is_same<Tag, linestring_tag>::value)
{
// Assume flat-end-strategy for now
// TODO fix this (approach) for one-side buffer (1.5 - -1.0)
output_point_type rp1, rp2;
generate_side(last_ip2, last_ip1,
side == buffer_side_left
? buffer_side_right
: buffer_side_left,
distance, rp2, rp1);
// For flat end:
std::vector<output_point_type> range_out;
range_out.push_back(previous_p2);
if (close)
{
range_out.push_back(rp2);
}
collection.add_piece(buffered_flat_end, range_out);
//.........這裏部分代碼省略.........
示例5: LOG
Status Database::dropCollection( const StringData& fullns ) {
LOG(1) << "dropCollection: " << fullns << endl;
massertNamespaceNotIndex( fullns, "dropCollection" );
Collection* collection = getCollection( fullns );
if ( !collection ) {
// collection doesn't exist
return Status::OK();
}
_initForWrites();
{
NamespaceString s( fullns );
verify( s.db() == _name );
if( s.isSystem() ) {
if( s.coll() == "system.profile" ) {
if ( _profile != 0 )
return Status( ErrorCodes::IllegalOperation,
"turn off profiling before dropping system.profile collection" );
}
else {
return Status( ErrorCodes::IllegalOperation, "can't drop system ns" );
}
}
}
BackgroundOperation::assertNoBgOpInProgForNs( fullns );
audit::logDropCollection( currentClient.get(), fullns );
try {
Status s = collection->getIndexCatalog()->dropAllIndexes( true );
if ( !s.isOK() ) {
warning() << "could not drop collection, trying to drop indexes"
<< fullns << " because of " << s.toString();
return s;
}
}
catch( DBException& e ) {
stringstream ss;
ss << "drop: dropIndexes for collection failed. cause: " << e.what();
ss << ". See http://dochub.mongodb.org/core/data-recovery";
warning() << ss.str() << endl;
return Status( ErrorCodes::InternalError, ss.str() );
}
verify( collection->_details->getTotalIndexCount() == 0 );
LOG(1) << "\t dropIndexes done" << endl;
ClientCursor::invalidate( fullns );
Top::global.collectionDropped( fullns );
Status s = _dropNS( fullns );
_clearCollectionCache( fullns ); // we want to do this always
if ( !s.isOK() )
return s;
DEV {
// check all index collection entries are gone
string nstocheck = fullns.toString() + ".$";
scoped_lock lk( _collectionLock );
for ( CollectionMap::const_iterator i = _collections.begin();
i != _collections.end();
++i ) {
string temp = i->first;
if ( temp.find( nstocheck ) != 0 )
continue;
log() << "after drop, bad cache entries for: "
<< fullns << " have " << temp;
verify(0);
}
}
return Status::OK();
}
示例6: tracker
bool MigrationSourceManager::clone(OperationContext* txn, string& errmsg, BSONObjBuilder& result) {
ElapsedTracker tracker(internalQueryExecYieldIterations, internalQueryExecYieldPeriodMS);
int allocSize = 0;
{
AutoGetCollectionForRead ctx(txn, _getNS());
stdx::lock_guard<stdx::mutex> sl(_mutex);
if (!_active) {
errmsg = "not active";
return false;
}
Collection* collection = ctx.getCollection();
if (!collection) {
errmsg = str::stream() << "collection " << _ns << " does not exist";
return false;
}
allocSize = std::min(
BSONObjMaxUserSize,
static_cast<int>((12 + collection->averageObjectSize(txn)) * cloneLocsRemaining()));
}
bool isBufferFilled = false;
BSONArrayBuilder clonedDocsArrayBuilder(allocSize);
while (!isBufferFilled) {
AutoGetCollectionForRead ctx(txn, _getNS());
stdx::lock_guard<stdx::mutex> sl(_mutex);
if (!_active) {
errmsg = "not active";
return false;
}
// TODO: fix SERVER-16540 race
Collection* collection = ctx.getCollection();
if (!collection) {
errmsg = str::stream() << "collection " << _ns << " does not exist";
return false;
}
stdx::lock_guard<stdx::mutex> lk(_cloneLocsMutex);
std::set<RecordId>::iterator cloneLocsIter = _cloneLocs.begin();
for (; cloneLocsIter != _cloneLocs.end(); ++cloneLocsIter) {
if (tracker.intervalHasElapsed()) // should I yield?
break;
RecordId recordId = *cloneLocsIter;
Snapshotted<BSONObj> doc;
if (!collection->findDoc(txn, recordId, &doc)) {
// doc was deleted
continue;
}
// Use the builder size instead of accumulating 'doc's size so that we take
// into consideration the overhead of BSONArray indices, and *always*
// append one doc.
if (clonedDocsArrayBuilder.arrSize() != 0 &&
(clonedDocsArrayBuilder.len() + doc.value().objsize() + 1024) >
BSONObjMaxUserSize) {
isBufferFilled = true; // break out of outer while loop
break;
}
clonedDocsArrayBuilder.append(doc.value());
}
_cloneLocs.erase(_cloneLocs.begin(), cloneLocsIter);
// Note: must be holding _cloneLocsMutex, don't move this inside while condition!
if (_cloneLocs.empty()) {
break;
}
}
result.appendArray("objects", clonedDocsArrayBuilder.arr());
return true;
}
示例7: testLink
void testLink()
{
SearchCreateJob *create = new SearchCreateJob( "linkTestFolder", "dummy query", this );
AKVERIFYEXEC( create );
CollectionFetchJob *list = new CollectionFetchJob( Collection( 1 ), CollectionFetchJob::Recursive, this );
AKVERIFYEXEC( list );
Collection col;
foreach ( const Collection &c, list->collections() ) {
if ( c.name() == "linkTestFolder" ) {
col = c;
}
}
QVERIFY( col.isValid() );
Item::List items;
items << Item( 3 ) << Item( 4 ) << Item( 6 );
Monitor *monitor = new Monitor( this );
monitor->setCollectionMonitored( col );
monitor->itemFetchScope().fetchFullPayload();
qRegisterMetaType<Akonadi::Collection>();
qRegisterMetaType<Akonadi::Item>();
QSignalSpy lspy( monitor, SIGNAL(itemLinked(Akonadi::Item,Akonadi::Collection)) );
QSignalSpy uspy( monitor, SIGNAL(itemUnlinked(Akonadi::Item,Akonadi::Collection)) );
QVERIFY( lspy.isValid() );
QVERIFY( uspy.isValid() );
LinkJob *link = new LinkJob( col, items, this );
AKVERIFYEXEC( link );
QTest::qWait( 1000 );
QVERIFY( uspy.isEmpty() );
QCOMPARE( lspy.count(), 3 );
QList<QVariant> arg = lspy.takeFirst();
Item item = arg.at( 0 ).value<Item>();
QCOMPARE( item.mimeType(), QString::fromLatin1( "application/octet-stream" ) );
QVERIFY( item.hasPayload<QByteArray>() );
lspy.clear();
ItemFetchJob *fetch = new ItemFetchJob( col );
AKVERIFYEXEC( fetch );
QCOMPARE( fetch->items().count(), 3 );
foreach ( const Item &item, fetch->items() ) {
QVERIFY( items.contains( item ) );
}
UnlinkJob *unlink = new UnlinkJob( col, items, this );
AKVERIFYEXEC( unlink );
QTest::qWait( 1000 );
QVERIFY( lspy.isEmpty() );
QCOMPARE( uspy.count(), 3 );
fetch = new ItemFetchJob( col );
AKVERIFYEXEC( fetch );
QCOMPARE( fetch->items().count(), 0 );
}
示例8: cachedHashedLock
std::string DBHashCmd::hashCollection(OperationContext* opCtx,
Database* db,
const std::string& fullCollectionName,
bool* fromCache) {
stdx::unique_lock<stdx::mutex> cachedHashedLock(_cachedHashedMutex, stdx::defer_lock);
if (isCachable(fullCollectionName)) {
cachedHashedLock.lock();
string hash = _cachedHashed[fullCollectionName];
if (hash.size() > 0) {
*fromCache = true;
return hash;
}
}
*fromCache = false;
Collection* collection = db->getCollection(fullCollectionName);
if (!collection)
return "";
IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(opCtx);
unique_ptr<PlanExecutor> exec;
if (desc) {
exec.reset(InternalPlanner::indexScan(opCtx,
collection,
desc,
BSONObj(),
BSONObj(),
false,
InternalPlanner::FORWARD,
InternalPlanner::IXSCAN_FETCH));
} else if (collection->isCapped()) {
exec.reset(InternalPlanner::collectionScan(opCtx, fullCollectionName, collection));
} else {
log() << "can't find _id index for: " << fullCollectionName << endl;
return "no _id _index";
}
md5_state_t st;
md5_init(&st);
long long n = 0;
PlanExecutor::ExecState state;
BSONObj c;
verify(NULL != exec.get());
while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) {
md5_append(&st, (const md5_byte_t*)c.objdata(), c.objsize());
n++;
}
if (PlanExecutor::IS_EOF != state) {
warning() << "error while hashing, db dropped? ns=" << fullCollectionName << endl;
}
md5digest d;
md5_finish(&st, d);
string hash = digestToString(d);
if (cachedHashedLock.owns_lock()) {
_cachedHashed[fullCollectionName] = hash;
}
return hash;
}
示例9: buildStages
PlanStage* buildStages(const string& ns, const QuerySolutionNode* root, WorkingSet* ws) {
if (STAGE_COLLSCAN == root->getType()) {
const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(root);
CollectionScanParams params;
params.ns = csn->name;
params.tailable = csn->tailable;
params.direction = (csn->direction == 1) ? CollectionScanParams::FORWARD
: CollectionScanParams::BACKWARD;
return new CollectionScan(params, ws, csn->filter.get());
}
else if (STAGE_IXSCAN == root->getType()) {
const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root);
//
// XXX XXX
// Given that this grabs data from the catalog, we must do this inside of a lock.
// We should change this to take a (ns, index key pattern) pair so that the params
// don't involve any on-disk data, just descriptions thereof.
// XXX XXX
//
Database* db = cc().database();
Collection* collection = db ? db->getCollection( ns ) : NULL;
if (NULL == collection) {
warning() << "Can't ixscan null ns " << ns << endl;
return NULL;
}
NamespaceDetails* nsd = collection->details();
int idxNo = nsd->findIndexByKeyPattern(ixn->indexKeyPattern);
if (-1 == idxNo) {
warning() << "Can't find idx " << ixn->indexKeyPattern.toString()
<< "in ns " << ns << endl;
return NULL;
}
IndexScanParams params;
params.descriptor = collection->getIndexCatalog()->getDescriptor( idxNo );
params.bounds = ixn->bounds;
params.direction = ixn->direction;
params.limit = ixn->limit;
return new IndexScan(params, ws, ixn->filter.get());
}
else if (STAGE_FETCH == root->getType()) {
const FetchNode* fn = static_cast<const FetchNode*>(root);
PlanStage* childStage = buildStages(ns, fn->children[0], ws);
if (NULL == childStage) { return NULL; }
return new FetchStage(ws, childStage, fn->filter.get());
}
else if (STAGE_SORT == root->getType()) {
const SortNode* sn = static_cast<const SortNode*>(root);
PlanStage* childStage = buildStages(ns, sn->children[0], ws);
if (NULL == childStage) { return NULL; }
SortStageParams params;
params.pattern = sn->pattern;
params.query = sn->query;
return new SortStage(params, ws, childStage);
}
else if (STAGE_PROJECTION == root->getType()) {
const ProjectionNode* pn = static_cast<const ProjectionNode*>(root);
PlanStage* childStage = buildStages(ns, pn->children[0], ws);
if (NULL == childStage) { return NULL; }
return new ProjectionStage(pn->projection, pn->fullExpression, ws, childStage);
}
else if (STAGE_LIMIT == root->getType()) {
const LimitNode* ln = static_cast<const LimitNode*>(root);
PlanStage* childStage = buildStages(ns, ln->children[0], ws);
if (NULL == childStage) { return NULL; }
return new LimitStage(ln->limit, ws, childStage);
}
else if (STAGE_SKIP == root->getType()) {
const SkipNode* sn = static_cast<const SkipNode*>(root);
PlanStage* childStage = buildStages(ns, sn->children[0], ws);
if (NULL == childStage) { return NULL; }
return new SkipStage(sn->skip, ws, childStage);
}
else if (STAGE_AND_HASH == root->getType()) {
const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
auto_ptr<AndHashStage> ret(new AndHashStage(ws, ahn->filter.get()));
for (size_t i = 0; i < ahn->children.size(); ++i) {
PlanStage* childStage = buildStages(ns, ahn->children[i], ws);
if (NULL == childStage) { return NULL; }
ret->addChild(childStage);
}
return ret.release();
}
else if (STAGE_OR == root->getType()) {
const OrNode * orn = static_cast<const OrNode*>(root);
auto_ptr<OrStage> ret(new OrStage(ws, orn->dedup, orn->filter.get()));
for (size_t i = 0; i < orn->children.size(); ++i) {
PlanStage* childStage = buildStages(ns, orn->children[i], ws);
if (NULL == childStage) { return NULL; }
ret->addChild(childStage);
}
return ret.release();
}
else if (STAGE_AND_SORTED == root->getType()) {
const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
auto_ptr<AndSortedStage> ret(new AndSortedStage(ws, asn->filter.get()));
for (size_t i = 0; i < asn->children.size(); ++i) {
PlanStage* childStage = buildStages(ns, asn->children[i], ws);
if (NULL == childStage) { return NULL; }
ret->addChild(childStage);
}
//.........這裏部分代碼省略.........
示例10: run
bool run(OperationContext* txn,
const string& dbname,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
if (!cmdObj["start"].eoo()) {
errmsg = "using deprecated 'start' argument to geoNear";
return false;
}
const NamespaceString nss(parseNs(dbname, cmdObj));
AutoGetCollectionForRead ctx(txn, nss);
Collection* collection = ctx.getCollection();
if (!collection) {
errmsg = "can't find ns";
return false;
}
IndexCatalog* indexCatalog = collection->getIndexCatalog();
// cout << "raw cmd " << cmdObj.toString() << endl;
// We seek to populate this.
string nearFieldName;
bool using2DIndex = false;
if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
return false;
}
PointWithCRS point;
uassert(17304,
"'near' field must be point",
GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK());
bool isSpherical = cmdObj["spherical"].trueValue();
if (!using2DIndex) {
uassert(17301, "2dsphere index must have spherical: true", isSpherical);
}
// Build the $near expression for the query.
BSONObjBuilder nearBob;
if (isSpherical) {
nearBob.append("$nearSphere", cmdObj["near"].Obj());
} else {
nearBob.append("$near", cmdObj["near"].Obj());
}
if (!cmdObj["maxDistance"].eoo()) {
uassert(17299, "maxDistance must be a number", cmdObj["maxDistance"].isNumber());
nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
}
if (!cmdObj["minDistance"].eoo()) {
uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
uassert(17300, "minDistance must be a number", cmdObj["minDistance"].isNumber());
nearBob.append("$minDistance", cmdObj["minDistance"].number());
}
if (!cmdObj["uniqueDocs"].eoo()) {
warning() << nss << ": ignoring deprecated uniqueDocs option in geoNear command";
}
// And, build the full query expression.
BSONObjBuilder queryBob;
queryBob.append(nearFieldName, nearBob.obj());
if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
queryBob.appendElements(cmdObj["query"].Obj());
}
BSONObj rewritten = queryBob.obj();
// Extract the collation, if it exists.
// TODO SERVER-23473: Pass this collation spec object down so that it can be converted into
// a CollatorInterface.
BSONObj collation;
{
BSONElement collationElt;
Status collationEltStatus =
bsonExtractTypedField(cmdObj, "collation", BSONType::Object, &collationElt);
if (!collationEltStatus.isOK() && (collationEltStatus != ErrorCodes::NoSuchKey)) {
return appendCommandStatus(result, collationEltStatus);
}
if (collationEltStatus.isOK()) {
collation = collationElt.Obj();
}
}
long long numWanted = 100;
const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
BSONElement eNumWanted = cmdObj[limitName];
if (!eNumWanted.eoo()) {
uassert(17303, "limit must be number", eNumWanted.isNumber());
numWanted = eNumWanted.safeNumberLong();
uassert(17302, "limit must be >=0", numWanted >= 0);
}
bool includeLocs = false;
if (!cmdObj["includeLocs"].eoo()) {
includeLocs = cmdObj["includeLocs"].trueValue();
//.........這裏部分代碼省略.........
示例11: run
virtual bool run(OperationContext* txn,
const string& db,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
const std::string ns = parseNs(db, cmdObj);
if (nsToCollectionSubstring(ns).empty()) {
errmsg = "missing collection name";
return false;
}
NamespaceString nss(ns);
intrusive_ptr<ExpressionContext> pCtx = new ExpressionContext(txn, nss);
pCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
/* try to parse the command; if this fails, then we didn't run */
intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx);
if (!pPipeline.get())
return false;
// This is outside of the if block to keep the object alive until the pipeline is finished.
BSONObj parsed;
if (kDebugBuild && !pPipeline->isExplain() && !pCtx->inShard) {
// Make sure all operations round-trip through Pipeline::toBson() correctly by
// reparsing every command in debug builds. This is important because sharded
// aggregations rely on this ability. Skipping when inShard because this has
// already been through the transformation (and this unsets pCtx->inShard).
parsed = pPipeline->serialize().toBson();
pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx);
verify(pPipeline);
}
unique_ptr<ClientCursorPin> pin; // either this OR the exec will be non-null
unique_ptr<PlanExecutor> exec;
{
// This will throw if the sharding version for this connection is out of date. The
// lock must be held continuously from now until we have we created both the output
// ClientCursor and the input executor. This ensures that both are using the same
// sharding version that we synchronize on here. This is also why we always need to
// create a ClientCursor even when we aren't outputting to a cursor. See the comment
// on ShardFilterStage for more details.
AutoGetCollectionForRead ctx(txn, nss.ns());
Collection* collection = ctx.getCollection();
// This does mongod-specific stuff like creating the input PlanExecutor and adding
// it to the front of the pipeline if needed.
std::shared_ptr<PlanExecutor> input =
PipelineD::prepareCursorSource(txn, collection, pPipeline, pCtx);
pPipeline->stitch();
// Create the PlanExecutor which returns results from the pipeline. The WorkingSet
// ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
// PlanExecutor.
unique_ptr<WorkingSet> ws(new WorkingSet());
unique_ptr<PipelineProxyStage> proxy(
new PipelineProxyStage(pPipeline, input, ws.get()));
auto statusWithPlanExecutor = (NULL == collection)
? PlanExecutor::make(
txn, std::move(ws), std::move(proxy), nss.ns(), PlanExecutor::YIELD_MANUAL)
: PlanExecutor::make(
txn, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL);
invariant(statusWithPlanExecutor.isOK());
exec = std::move(statusWithPlanExecutor.getValue());
if (!collection && input) {
// If we don't have a collection, we won't be able to register any executors, so
// make sure that the input PlanExecutor (likely wrapping an EOFStage) doesn't
// need to be registered.
invariant(!input->collection());
}
if (collection) {
const bool isAggCursor = true; // enable special locking behavior
ClientCursor* cursor =
new ClientCursor(collection->getCursorManager(),
exec.release(),
nss.ns(),
txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
0,
cmdObj.getOwned(),
isAggCursor);
pin.reset(new ClientCursorPin(collection->getCursorManager(), cursor->cursorid()));
// Don't add any code between here and the start of the try block.
}
// At this point, it is safe to release the collection lock.
// - In the case where we have a collection: we will need to reacquire the
// collection lock later when cleaning up our ClientCursorPin.
// - In the case where we don't have a collection: our PlanExecutor won't be
// registered, so it will be safe to clean it up outside the lock.
invariant(NULL == exec.get() || NULL == exec->collection());
}
try {
// Unless set to true, the ClientCursor created above will be deleted on block exit.
bool keepCursor = false;
//.........這裏部分代碼省略.........
示例12: appendCollectionStorageStats
Status appendCollectionStorageStats(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& param,
BSONObjBuilder* result) {
int scale = 1;
if (param["scale"].isNumber()) {
scale = param["scale"].numberInt();
if (scale < 1) {
return {ErrorCodes::BadValue, "scale has to be >= 1"};
}
} else if (param["scale"].trueValue()) {
return {ErrorCodes::BadValue, "scale has to be a number >= 1"};
}
bool verbose = param["verbose"].trueValue();
AutoGetCollectionForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection(); // Will be set if present
if (!ctx.getDb() || !collection) {
result->appendNumber("size", 0);
result->appendNumber("count", 0);
result->appendNumber("storageSize", 0);
result->append("nindexes", 0);
result->appendNumber("totalIndexSize", 0);
result->append("indexDetails", BSONObj());
result->append("indexSizes", BSONObj());
std::string errmsg = !(ctx.getDb()) ? "Database [" + nss.db().toString() + "] not found."
: "Collection [" + nss.toString() + "] not found.";
return {ErrorCodes::NamespaceNotFound, errmsg};
}
long long size = collection->dataSize(opCtx) / scale;
result->appendNumber("size", size);
long long numRecords = collection->numRecords(opCtx);
result->appendNumber("count", numRecords);
if (numRecords)
result->append("avgObjSize", collection->averageObjectSize(opCtx));
RecordStore* recordStore = collection->getRecordStore();
result->appendNumber(
"storageSize",
static_cast<long long>(recordStore->storageSize(opCtx, result, verbose ? 1 : 0)) / scale);
recordStore->appendCustomStats(opCtx, result, scale);
IndexCatalog* indexCatalog = collection->getIndexCatalog();
result->append("nindexes", indexCatalog->numIndexesReady(opCtx));
BSONObjBuilder indexDetails;
std::unique_ptr<IndexCatalog::IndexIterator> it = indexCatalog->getIndexIterator(opCtx, false);
while (it->more()) {
const IndexCatalogEntry* entry = it->next();
const IndexDescriptor* descriptor = entry->descriptor();
const IndexAccessMethod* iam = entry->accessMethod();
invariant(iam);
BSONObjBuilder bob;
if (iam->appendCustomStats(opCtx, &bob, scale)) {
indexDetails.append(descriptor->indexName(), bob.obj());
}
}
result->append("indexDetails", indexDetails.obj());
BSONObjBuilder indexSizes;
long long indexSize = collection->getIndexSize(opCtx, &indexSizes, scale);
result->appendNumber("totalIndexSize", indexSize / scale);
result->append("indexSizes", indexSizes.obj());
return Status::OK();
}
示例13: run
virtual bool run( const string& dbname, BSONObj& cmdObj, int options,
string& errmsg, BSONObjBuilder& result,
bool fromRepl = false ) {
// --- parse
NamespaceString ns( dbname, cmdObj[name].String() );
Status status = userAllowedWriteNS( ns );
if ( !status.isOK() )
return appendCommandStatus( result, status );
if ( cmdObj["indexes"].type() != Array ) {
errmsg = "indexes has to be an array";
result.append( "cmdObj", cmdObj );
return false;
}
std::vector<BSONObj> specs;
{
BSONObjIterator i( cmdObj["indexes"].Obj() );
while ( i.more() ) {
BSONElement e = i.next();
if ( e.type() != Object ) {
errmsg = "everything in indexes has to be an Object";
result.append( "cmdObj", cmdObj );
return false;
}
specs.push_back( e.Obj() );
}
}
if ( specs.size() == 0 ) {
errmsg = "no indexes to add";
return false;
}
// check specs
for ( size_t i = 0; i < specs.size(); i++ ) {
BSONObj spec = specs[i];
if ( spec["ns"].eoo() ) {
spec = _addNsToSpec( ns, spec );
specs[i] = spec;
}
if ( spec["ns"].type() != String ) {
errmsg = "spec has no ns";
result.append( "spec", spec );
return false;
}
if ( ns != spec["ns"].String() ) {
errmsg = "namespace mismatch";
result.append( "spec", spec );
return false;
}
}
{
// We first take a read lock to see if we need to do anything
// as many calls are ensureIndex (and hence no-ops), this is good so its a shared
// lock for common calls. We only take write lock if needed.
// Note: createIndexes command does not currently respect shard versioning.
Client::ReadContext readContext( ns,
storageGlobalParams.dbpath,
false /* doVersion */ );
const Collection* collection = readContext.ctx().db()->getCollection( ns.ns() );
if ( collection ) {
for ( size_t i = 0; i < specs.size(); i++ ) {
BSONObj spec = specs[i];
StatusWith<BSONObj> statusWithSpec =
collection->getIndexCatalog()->prepareSpecForCreate( spec );
status = statusWithSpec.getStatus();
if ( status.code() == ErrorCodes::IndexAlreadyExists ) {
specs.erase( specs.begin() + i );
i--;
continue;
}
if ( !status.isOK() )
return appendCommandStatus( result, status );
}
if ( specs.size() == 0 ) {
result.append( "numIndexesBefore",
collection->getIndexCatalog()->numIndexesTotal() );
result.append( "note", "all indexes already exist" );
return true;
}
// need to create index
}
}
// now we know we have to create index(es)
// Note: createIndexes command does not currently respect shard versioning.
Client::WriteContext writeContext( ns.ns(),
storageGlobalParams.dbpath,
false /* doVersion */ );
Database* db = writeContext.ctx().db();
Collection* collection = db->getCollection( ns.ns() );
//.........這裏部分代碼省略.........
示例14: LOG
void IndexRebuilder::checkNS(const std::list<std::string>& nsToCheck) {
bool firstTime = true;
for (std::list<std::string>::const_iterator it = nsToCheck.begin();
it != nsToCheck.end();
++it) {
string ns = *it;
LOG(3) << "IndexRebuilder::checkNS: " << ns;
// This write lock is held throughout the index building process
// for this namespace.
Client::WriteContext ctx(ns);
DurTransaction txn; // XXX???
Collection* collection = ctx.ctx().db()->getCollection( ns );
if ( collection == NULL )
continue;
IndexCatalog* indexCatalog = collection->getIndexCatalog();
if ( collection->ns().isOplog() && indexCatalog->numIndexesTotal() > 0 ) {
warning() << ns << " had illegal indexes, removing";
indexCatalog->dropAllIndexes(&txn, true);
continue;
}
vector<BSONObj> indexesToBuild = indexCatalog->getAndClearUnfinishedIndexes(&txn);
// The indexes have now been removed from system.indexes, so the only record is
// in-memory. If there is a journal commit between now and when insert() rewrites
// the entry and the db crashes before the new system.indexes entry is journalled,
// the index will be lost forever. Thus, we're assuming no journaling will happen
// between now and the entry being re-written.
if ( indexesToBuild.size() == 0 ) {
continue;
}
log() << "found " << indexesToBuild.size()
<< " interrupted index build(s) on " << ns;
if (firstTime) {
log() << "note: restart the server with --noIndexBuildRetry to skip index rebuilds";
firstTime = false;
}
if (!serverGlobalParams.indexBuildRetry) {
log() << " not rebuilding interrupted indexes";
continue;
}
// TODO: these can/should/must be done in parallel
for ( size_t i = 0; i < indexesToBuild.size(); i++ ) {
BSONObj indexObj = indexesToBuild[i];
log() << "going to rebuild: " << indexObj;
Status status = indexCatalog->createIndex(&txn, indexObj, false);
if ( !status.isOK() ) {
log() << "building index failed: " << status.toString() << " index: " << indexObj;
}
}
}
}
示例15: ctx
bool MigrationSourceManager::storeCurrentLocs(OperationContext* txn,
long long maxChunkSize,
string& errmsg,
BSONObjBuilder& result) {
AutoGetCollectionForRead ctx(txn, _getNS());
Collection* collection = ctx.getCollection();
if (!collection) {
errmsg = "ns not found, should be impossible";
return false;
}
// Allow multiKey based on the invariant that shard keys must be single-valued. Therefore, any
// multi-key index prefixed by shard key cannot be multikey over the shard key fields.
IndexDescriptor* idx =
collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn,
_shardKeyPattern,
false); // requireSingleKey
if (idx == NULL) {
errmsg = str::stream() << "can't find index with prefix " << _shardKeyPattern
<< " in storeCurrentLocs for " << _ns;
return false;
}
// Assume both min and max non-empty, append MinKey's to make them fit chosen index
BSONObj min;
BSONObj max;
KeyPattern kp(idx->keyPattern());
{
// It's alright not to lock _mutex all the way through based on the assumption that this is
// only called by the main thread that drives the migration and only it can start and stop
// the current migration.
stdx::lock_guard<stdx::mutex> sl(_mutex);
invariant(_deleteNotifyExec.get() == NULL);
unique_ptr<WorkingSet> ws = stdx::make_unique<WorkingSet>();
unique_ptr<DeleteNotificationStage> dns = stdx::make_unique<DeleteNotificationStage>(this);
// Takes ownership of 'ws' and 'dns'.
auto statusWithPlanExecutor = PlanExecutor::make(
txn, std::move(ws), std::move(dns), collection, PlanExecutor::YIELD_MANUAL);
invariant(statusWithPlanExecutor.isOK());
_deleteNotifyExec = std::move(statusWithPlanExecutor.getValue());
_deleteNotifyExec->registerExec();
min = Helpers::toKeyFormat(kp.extendRangeBound(_min, false));
max = Helpers::toKeyFormat(kp.extendRangeBound(_max, false));
}
unique_ptr<PlanExecutor> exec(
InternalPlanner::indexScan(txn, collection, idx, min, max, false));
// We can afford to yield here because any change to the base data that we might miss is already
// being queued and will migrate in the 'transferMods' stage.
exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
// Use the average object size to estimate how many objects a full chunk would carry do that
// while traversing the chunk's range using the sharding index, below there's a fair amount of
// slack before we determine a chunk is too large because object sizes will vary.
unsigned long long maxRecsWhenFull;
long long avgRecSize;
const long long totalRecs = collection->numRecords(txn);
if (totalRecs > 0) {
avgRecSize = collection->dataSize(txn) / totalRecs;
maxRecsWhenFull = maxChunkSize / avgRecSize;
maxRecsWhenFull = std::min((unsigned long long)(Chunk::MaxObjectPerChunk + 1),
130 * maxRecsWhenFull / 100 /* slack */);
} else {
avgRecSize = 0;
maxRecsWhenFull = Chunk::MaxObjectPerChunk + 1;
}
// Do a full traversal of the chunk and don't stop even if we think it is a large chunk we want
// the number of records to better report, in that case
bool isLargeChunk = false;
unsigned long long recCount = 0;
RecordId recordId;
while (PlanExecutor::ADVANCED == exec->getNext(NULL, &recordId)) {
if (!isLargeChunk) {
stdx::lock_guard<stdx::mutex> lk(_cloneLocsMutex);
_cloneLocs.insert(recordId);
}
if (++recCount > maxRecsWhenFull) {
isLargeChunk = true;
// Continue on despite knowing that it will fail, just to get the correct value for
// recCount
}
}
exec.reset();
if (isLargeChunk) {
stdx::lock_guard<stdx::mutex> sl(_mutex);
warning() << "cannot move chunk: the maximum number of documents for a chunk is "
//.........這裏部分代碼省略.........