本文整理汇总了C++中indexcatalog::IndexIterator::more方法的典型用法代码示例。如果您正苦于以下问题:C++ IndexIterator::more方法的具体用法?C++ IndexIterator::more怎么用?C++ IndexIterator::more使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类indexcatalog::IndexIterator
的用法示例。
在下文中一共展示了IndexIterator::more方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: validate
Status Collection::validate(OperationContext* txn,
bool full,
bool scanData,
ValidateResults* results,
BSONObjBuilder* output) {
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
MyValidateAdaptor adaptor;
Status status = _recordStore->validate(txn, full, scanData, &adaptor, results, output);
if (!status.isOK())
return status;
{ // indexes
output->append("nIndexes", _indexCatalog.numIndexesReady(txn));
int idxn = 0;
try {
// Only applicable when 'full' validation is requested.
std::unique_ptr<BSONObjBuilder> indexDetails(full ? new BSONObjBuilder() : NULL);
BSONObjBuilder indexes; // not using subObjStart to be exception safe
IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(txn, false);
while (i.more()) {
const IndexDescriptor* descriptor = i.next();
log(LogComponent::kIndex) << "validating index " << descriptor->indexNamespace()
<< endl;
IndexAccessMethod* iam = _indexCatalog.getIndex(descriptor);
invariant(iam);
std::unique_ptr<BSONObjBuilder> bob(
indexDetails.get() ? new BSONObjBuilder(indexDetails->subobjStart(
descriptor->indexNamespace()))
: NULL);
int64_t keys;
iam->validate(txn, full, &keys, bob.get());
indexes.appendNumber(descriptor->indexNamespace(), static_cast<long long>(keys));
if (bob) {
BSONObj obj = bob->done();
BSONElement valid = obj["valid"];
if (valid.ok() && !valid.trueValue()) {
results->valid = false;
}
}
idxn++;
}
output->append("keysPerIndex", indexes.done());
if (indexDetails.get()) {
output->append("indexDetails", indexDetails->done());
}
} catch (DBException& exc) {
string err = str::stream() << "exception during index validate idxn "
<< BSONObjBuilder::numStr(idxn) << ": " << exc.toString();
results->errors.push_back(err);
results->valid = false;
}
}
return Status::OK();
}
示例2: run
virtual bool run(OperationContext* txn,
const string& dbname,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result,
bool fromRepl) {
Lock::GlobalWrite globalWriteLock(txn->lockState());
string source = cmdObj.getStringField( name.c_str() );
string target = cmdObj.getStringField( "to" );
// We stay in source context the whole time. This is mostly to set the CurOp namespace.
Client::Context ctx(txn, source);
if ( !NamespaceString::validCollectionComponent(target.c_str()) ) {
errmsg = "invalid collection name: " + target;
return false;
}
if ( source.empty() || target.empty() ) {
errmsg = "invalid command syntax";
return false;
}
if (!fromRepl) { // If it got through on the master, need to allow it here too
Status sourceStatus = userAllowedWriteNS(source);
if (!sourceStatus.isOK()) {
errmsg = "error with source namespace: " + sourceStatus.reason();
return false;
}
Status targetStatus = userAllowedWriteNS(target);
if (!targetStatus.isOK()) {
errmsg = "error with target namespace: " + targetStatus.reason();
return false;
}
}
if (NamespaceString(source).coll() == "system.indexes"
|| NamespaceString(target).coll() == "system.indexes") {
errmsg = "renaming system.indexes is not allowed";
return false;
}
Database* const sourceDB = dbHolder().get(txn, nsToDatabase(source));
Collection* const sourceColl = sourceDB ? sourceDB->getCollection(txn, source)
: NULL;
if (!sourceColl) {
errmsg = "source namespace does not exist";
return false;
}
{
// Ensure that collection name does not exceed maximum length.
// Ensure that index names do not push the length over the max.
// Iterator includes unfinished indexes.
IndexCatalog::IndexIterator sourceIndIt =
sourceColl->getIndexCatalog()->getIndexIterator( txn, true );
int longestIndexNameLength = 0;
while ( sourceIndIt.more() ) {
int thisLength = sourceIndIt.next()->indexName().length();
if ( thisLength > longestIndexNameLength )
longestIndexNameLength = thisLength;
}
unsigned int longestAllowed =
min(int(NamespaceString::MaxNsCollectionLen),
int(NamespaceString::MaxNsLen) - 2/*strlen(".$")*/ - longestIndexNameLength);
if (target.size() > longestAllowed) {
StringBuilder sb;
sb << "collection name length of " << target.size()
<< " exceeds maximum length of " << longestAllowed
<< ", allowing for index names";
errmsg = sb.str();
return false;
}
}
const std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, sourceDB, cmdObj);
// Dismissed on success
ScopeGuard indexBuildRestorer = MakeGuard(IndexBuilder::restoreIndexes, indexesInProg);
Database* const targetDB = dbHolder().openDb(txn, nsToDatabase(target));
{
WriteUnitOfWork wunit(txn);
// Check if the target namespace exists and if dropTarget is true.
// If target exists and dropTarget is not true, return false.
if (targetDB->getCollection(txn, target)) {
if (!cmdObj["dropTarget"].trueValue()) {
errmsg = "target namespace exists";
return false;
}
Status s = targetDB->dropCollection(txn, target);
if ( !s.isOK() ) {
errmsg = s.toString();
return false;
}
}
//.........这里部分代码省略.........
示例3: while
StatusWith<DiskLoc> Collection::updateDocument( OperationContext* txn,
const DiskLoc& oldLocation,
const BSONObj& objNew,
bool enforceQuota,
OpDebug* debug ) {
BSONObj objOld = _recordStore->dataFor( txn, oldLocation ).toBson();
if ( objOld.hasElement( "_id" ) ) {
BSONElement oldId = objOld["_id"];
BSONElement newId = objNew["_id"];
if ( oldId != newId )
return StatusWith<DiskLoc>( ErrorCodes::InternalError,
"in Collection::updateDocument _id mismatch",
13596 );
}
if ( ns().coll() == "system.users" ) {
// XXX - andy and spencer think this should go away now
V2UserDocumentParser parser;
Status s = parser.checkValidUserDocument(objNew);
if ( !s.isOK() )
return StatusWith<DiskLoc>( s );
}
/* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
*/
OwnedPointerMap<IndexDescriptor*,UpdateTicket> updateTickets;
IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, true );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed =
!(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique())
|| repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
UpdateTicket* updateTicket = new UpdateTicket();
updateTickets.mutableMap()[descriptor] = updateTicket;
Status ret = iam->validateUpdate(txn, objOld, objNew, oldLocation, options, updateTicket );
if ( !ret.isOK() ) {
return StatusWith<DiskLoc>( ret );
}
}
// this can callback into Collection::recordStoreGoingToMove
StatusWith<DiskLoc> newLocation = _recordStore->updateRecord( txn,
oldLocation,
objNew.objdata(),
objNew.objsize(),
_enforceQuota( enforceQuota ),
this );
if ( !newLocation.isOK() ) {
return newLocation;
}
_infoCache.notifyOfWriteOp();
if ( newLocation.getValue() != oldLocation ) {
if ( debug ) {
if (debug->nmoved == -1) // default of -1 rather than 0
debug->nmoved = 1;
else
debug->nmoved += 1;
}
_indexCatalog.indexRecord(txn, objNew, newLocation.getValue());
return newLocation;
}
if ( debug )
debug->keyUpdates = 0;
ii = _indexCatalog.getIndexIterator( txn, true );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
int64_t updatedKeys;
Status ret = iam->update(txn, *updateTickets.mutableMap()[descriptor], &updatedKeys);
if ( !ret.isOK() )
return StatusWith<DiskLoc>( ret );
if ( debug )
debug->keyUpdates += updatedKeys;
}
// Broadcast the mutation so that query results stay correct.
_cursorCache.invalidateDocument(oldLocation, INVALIDATION_MUTATION);
return newLocation;
}
示例4: checkValidation
StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
const RecordId& oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
bool enforceQuota,
bool indexesAffected,
OpDebug* debug,
oplogUpdateEntryArgs& args) {
{
auto status = checkValidation(txn, newDoc);
if (!status.isOK()) {
if (_validationLevel == STRICT_V) {
return status;
}
// moderate means we have to check the old doc
auto oldDocStatus = checkValidation(txn, oldDoc.value());
if (oldDocStatus.isOK()) {
// transitioning from good -> bad is not ok
return status;
}
// bad -> bad is ok in moderate mode
}
}
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
invariant(oldDoc.snapshotId() == txn->recoveryUnit()->getSnapshotId());
SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
BSONElement oldId = oldDoc.value()["_id"];
if (!oldId.eoo() && (oldId != newDoc["_id"]))
return StatusWith<RecordId>(
ErrorCodes::InternalError, "in Collection::updateDocument _id mismatch", 13596);
// At the end of this step, we will have a map of UpdateTickets, one per index, which
// represent the index updates needed to be done, based on the changes between oldDoc and
// newDoc.
OwnedPointerMap<IndexDescriptor*, UpdateTicket> updateTickets;
if (indexesAffected) {
IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true);
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
IndexCatalogEntry* entry = ii.catalogEntry(descriptor);
IndexAccessMethod* iam = ii.accessMethod(descriptor);
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed =
!(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique()) ||
repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
UpdateTicket* updateTicket = new UpdateTicket();
updateTickets.mutableMap()[descriptor] = updateTicket;
Status ret = iam->validateUpdate(txn,
oldDoc.value(),
newDoc,
oldLocation,
options,
updateTicket,
entry->getFilterExpression());
if (!ret.isOK()) {
return StatusWith<RecordId>(ret);
}
}
}
// This can call back into Collection::recordStoreGoingToMove. If that happens, the old
// object is removed from all indexes.
StatusWith<RecordId> newLocation = _recordStore->updateRecord(
txn, oldLocation, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota), this);
if (!newLocation.isOK()) {
return newLocation;
}
// At this point, the old object may or may not still be indexed, depending on if it was
// moved. If the object did move, we need to add the new location to all indexes.
if (newLocation.getValue() != oldLocation) {
if (debug) {
if (debug->nmoved == -1) // default of -1 rather than 0
debug->nmoved = 1;
else
debug->nmoved += 1;
}
Status s = _indexCatalog.indexRecord(txn, newDoc, newLocation.getValue());
if (!s.isOK())
return StatusWith<RecordId>(s);
invariant(sid == txn->recoveryUnit()->getSnapshotId());
args.ns = ns().ns();
getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
return newLocation;
}
// Object did not move. We update each index with each respective UpdateTicket.
if (debug)
debug->keyUpdates = 0;
if (indexesAffected) {
//.........这里部分代码省略.........
示例5: fillOutPlannerParams
void fillOutPlannerParams(Collection* collection,
CanonicalQuery* canonicalQuery,
QueryPlannerParams* plannerParams) {
// If it's not NULL, we may have indices. Access the catalog and fill out IndexEntry(s)
IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(false);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
plannerParams->indices.push_back(IndexEntry(desc->keyPattern(),
desc->getAccessMethodName(),
desc->isMultikey(),
desc->isSparse(),
desc->indexName(),
desc->infoObj()));
}
// If query supports index filters, filter params.indices by indices in query settings.
QuerySettings* querySettings = collection->infoCache()->getQuerySettings();
AllowedIndices* allowedIndicesRaw;
// Filter index catalog if index filters are specified for query.
// Also, signal to planner that application hint should be ignored.
if (querySettings->getAllowedIndices(*canonicalQuery, &allowedIndicesRaw)) {
boost::scoped_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
filterAllowedIndexEntries(*allowedIndices, &plannerParams->indices);
plannerParams->indexFiltersApplied = true;
}
// We will not output collection scans unless there are no indexed solutions. NO_TABLE_SCAN
// overrides this behavior by not outputting a collscan even if there are no indexed
// solutions.
if (storageGlobalParams.noTableScan) {
const string& ns = canonicalQuery->ns();
// There are certain cases where we ignore this restriction:
bool ignore = canonicalQuery->getQueryObj().isEmpty()
|| (string::npos != ns.find(".system."))
|| (0 == ns.find("local."));
if (!ignore) {
plannerParams->options |= QueryPlannerParams::NO_TABLE_SCAN;
}
}
// If the caller wants a shard filter, make sure we're actually sharded.
if (plannerParams->options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
CollectionMetadataPtr collMetadata =
shardingState.getCollectionMetadata(canonicalQuery->ns());
if (collMetadata) {
plannerParams->shardKey = collMetadata->getKeyPattern();
}
else {
// If there's no metadata don't bother w/the shard filter since we won't know what
// the key pattern is anyway...
plannerParams->options &= ~QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
}
if (internalQueryPlannerEnableIndexIntersection) {
plannerParams->options |= QueryPlannerParams::INDEX_INTERSECTION;
}
plannerParams->options |= QueryPlannerParams::KEEP_MUTATIONS;
plannerParams->options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
}
示例6: getRunnerDistinct
Status getRunnerDistinct(Collection* collection,
const BSONObj& query,
const string& field,
Runner** out) {
// This should'a been checked by the distinct command.
verify(collection);
// TODO: check for idhack here?
// When can we do a fast distinct hack?
// 1. There is a plan with just one leaf and that leaf is an ixscan.
// 2. The ixscan indexes the field we're interested in.
// 2a: We are correct if the index contains the field but for now we look for prefix.
// 3. The query is covered/no fetch.
//
// We go through normal planning (with limited parameters) to see if we can produce
// a soln with the above properties.
QueryPlannerParams plannerParams;
plannerParams.options = QueryPlannerParams::NO_TABLE_SCAN;
IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(false);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
// The distinct hack can work if any field is in the index but it's not always clear
// if it's a win unless it's the first field.
if (desc->keyPattern().firstElement().fieldName() == field) {
plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
desc->getAccessMethodName(),
desc->isMultikey(),
desc->isSparse(),
desc->indexName(),
desc->infoObj()));
}
}
// If there are no suitable indices for the distinct hack bail out now into regular planning
// with no projection.
if (plannerParams.indices.empty()) {
CanonicalQuery* cq;
Status status = CanonicalQuery::canonicalize(collection->ns().ns(),
query,
BSONObj(),
BSONObj(),
&cq);
if (!status.isOK()) {
return status;
}
// Takes ownership of cq.
return getRunner(collection, cq, out);
}
//
// If we're here, we have an index prefixed by the field we're distinct-ing over.
//
// Applying a projection allows the planner to try to give us covered plans that we can turn
// into the projection hack. getDistinctProjection deals with .find() projection semantics
// (ie _id:1 being implied by default).
BSONObj projection = getDistinctProjection(field);
// Apply a projection of the key. Empty BSONObj() is for the sort.
CanonicalQuery* cq;
Status status = CanonicalQuery::canonicalize(collection->ns().ns(),
query,
BSONObj(),
projection,
&cq);
if (!status.isOK()) {
return status;
}
// If there's no query, we can just distinct-scan one of the indices.
// Not every index in plannerParams.indices may be suitable. Refer to
// getDistinctNodeIndex().
size_t distinctNodeIndex = 0;
if (query.isEmpty() &&
getDistinctNodeIndex(plannerParams.indices, field, &distinctNodeIndex)) {
DistinctNode* dn = new DistinctNode();
dn->indexKeyPattern = plannerParams.indices[distinctNodeIndex].keyPattern;
dn->direction = 1;
IndexBoundsBuilder::allValuesBounds(dn->indexKeyPattern, &dn->bounds);
dn->fieldNo = 0;
QueryPlannerParams params;
// Takes ownership of 'dn'.
QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(*cq, params, dn);
verify(soln);
LOG(2) << "Using fast distinct: " << cq->toStringShort()
<< ", planSummary: " << getPlanSummary(*soln);
WorkingSet* ws;
PlanStage* root;
verify(StageBuilder::build(collection, *soln, &root, &ws));
*out = new SingleSolutionRunner(collection, cq, soln, root, ws);
return Status::OK();
}
//.........这里部分代码省略.........
示例7: Status
//.........这里部分代码省略.........
return status;
}
namespacesToCopy[ns] = options;
}
}
}
for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin();
i != namespacesToCopy.end();
++i ) {
string ns = i->first;
CollectionOptions options = i->second;
Collection* tempCollection = NULL;
{
Client::Context tempContext(txn, ns, tempDatabase );
WriteUnitOfWork wunit(txn);
tempCollection = tempDatabase->createCollection(txn, ns, options, true, false);
wunit.commit();
}
Client::Context readContext(txn, ns, originalDatabase);
Collection* originalCollection = originalDatabase->getCollection( txn, ns );
invariant( originalCollection );
// data
// TODO SERVER-14812 add a mode that drops duplicates rather than failing
MultiIndexBlock indexer(txn, tempCollection );
{
vector<BSONObj> indexes;
IndexCatalog::IndexIterator ii =
originalCollection->getIndexCatalog()->getIndexIterator( false );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
indexes.push_back( desc->infoObj() );
}
Client::Context tempContext(txn, ns, tempDatabase);
Status status = indexer.init( indexes );
if ( !status.isOK() )
return status;
}
scoped_ptr<RecordIterator> iterator(
originalCollection->getIterator( txn, DiskLoc(), false,
CollectionScanParams::FORWARD ));
while ( !iterator->isEOF() ) {
DiskLoc loc = iterator->getNext();
invariant( !loc.isNull() );
BSONObj doc = originalCollection->docFor( loc );
Client::Context tempContext(txn, ns, tempDatabase);
WriteUnitOfWork wunit(txn);
StatusWith<DiskLoc> result = tempCollection->insertDocument(txn,
doc,
&indexer,
false);
if ( !result.isOK() )
return result.getStatus();
wunit.commit();
txn->checkForInterrupt(false);
}
示例8: renameCollection
Status renameCollection(OperationContext* txn,
const NamespaceString& source,
const NamespaceString& target,
bool dropTarget,
bool stayTemp) {
DisableDocumentValidation validationDisabler(txn);
ScopedTransaction transaction(txn, MODE_X);
Lock::GlobalWrite globalWriteLock(txn->lockState());
// We stay in source context the whole time. This is mostly to set the CurOp namespace.
OldClientContext ctx(txn, source.ns());
bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(source);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while renaming collection " << source.ns()
<< " to "
<< target.ns());
}
Database* const sourceDB = dbHolder().get(txn, source.db());
Collection* const sourceColl = sourceDB ? sourceDB->getCollection(source.ns()) : nullptr;
if (!sourceColl) {
return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist");
}
{
// Ensure that collection name does not exceed maximum length.
// Ensure that index names do not push the length over the max.
// Iterator includes unfinished indexes.
IndexCatalog::IndexIterator sourceIndIt =
sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
int longestIndexNameLength = 0;
while (sourceIndIt.more()) {
int thisLength = sourceIndIt.next()->indexName().length();
if (thisLength > longestIndexNameLength)
longestIndexNameLength = thisLength;
}
unsigned int longestAllowed =
std::min(int(NamespaceString::MaxNsCollectionLen),
int(NamespaceString::MaxNsLen) - 2 /*strlen(".$")*/ - longestIndexNameLength);
if (target.size() > longestAllowed) {
StringBuilder sb;
sb << "collection name length of " << target.size() << " exceeds maximum length of "
<< longestAllowed << ", allowing for index names";
return Status(ErrorCodes::InvalidLength, sb.str());
}
}
BackgroundOperation::assertNoBgOpInProgForNs(source.ns());
Database* const targetDB = dbHolder().openDb(txn, target.db());
{
WriteUnitOfWork wunit(txn);
// Check if the target namespace exists and if dropTarget is true.
// If target exists and dropTarget is not true, return false.
if (targetDB->getCollection(target)) {
if (!dropTarget) {
return Status(ErrorCodes::NamespaceExists, "target namespace exists");
}
Status s = targetDB->dropCollection(txn, target.ns());
if (!s.isOK()) {
return s;
}
}
// If we are renaming in the same database, just
// rename the namespace and we're done.
if (sourceDB == targetDB) {
Status s = targetDB->renameCollection(txn, source.ns(), target.ns(), stayTemp);
if (!s.isOK()) {
return s;
}
getGlobalServiceContext()->getOpObserver()->onRenameCollection(
txn, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
wunit.commit();
return Status::OK();
}
wunit.commit();
}
// If we get here, we are renaming across databases, so we must copy all the data and
// indexes, then remove the source collection.
// Create the target collection. It will be removed if we fail to copy the collection.
// TODO use a temp collection and unset the temp flag on success.
Collection* targetColl = nullptr;
{
CollectionOptions options = sourceColl->getCatalogEntry()->getCollectionOptions(txn);
WriteUnitOfWork wunit(txn);
//.........这里部分代码省略.........
示例9: getRunnerDistinct
Status getRunnerDistinct(Collection* collection,
const BSONObj& query,
const string& field,
Runner** out) {
Database* db = cc().database();
verify(db);
// This should'a been checked by the distinct command.
verify(collection);
// TODO: check for idhack here?
// When can we do a fast distinct hack?
// 1. There is a plan with just one leaf and that leaf is an ixscan.
// 2. The ixscan indexes the field we're interested in.
// 2a: We are correct if the index contains the field but for now we look for prefix.
// 3. The query is covered/no fetch.
//
// We go through normal planning (with limited parameters) to see if we can produce
// a soln with the above properties.
QueryPlannerParams plannerParams;
plannerParams.options = QueryPlannerParams::NO_TABLE_SCAN;
IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(false);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
// The distinct hack can work if any field is in the index but it's not always clear
// if it's a win unless it's the first field.
if (desc->keyPattern().firstElement().fieldName() == field) {
plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
desc->isMultikey(),
desc->isSparse(),
desc->indexName(),
desc->infoObj()));
}
}
// We only care about the field that we're projecting over. Have to drop the _id field
// explicitly because those are .find() semantics.
//
// Applying a projection allows the planner to try to give us covered plans.
BSONObj projection;
if ("_id" == field) {
projection = BSON("_id" << 1);
}
else {
projection = BSON("_id" << 0 << field << 1);
}
// Apply a projection of the key. Empty BSONObj() is for the sort.
CanonicalQuery* cq;
Status status = CanonicalQuery::canonicalize(collection->ns().ns(), query, BSONObj(), projection, &cq);
if (!status.isOK()) {
return status;
}
// No index has the field we're looking for. Punt to normal planning.
if (plannerParams.indices.empty()) {
// Takes ownership of cq.
return getRunner(cq, out);
}
// If we're here, we have an index prefixed by the field we're distinct-ing over.
// If there's no query, we can just distinct-scan one of the indices.
if (query.isEmpty()) {
DistinctNode* dn = new DistinctNode();
dn->indexKeyPattern = plannerParams.indices[0].keyPattern;
dn->direction = 1;
IndexBoundsBuilder::allValuesBounds(dn->indexKeyPattern, &dn->bounds);
dn->fieldNo = 0;
QueryPlannerParams params;
// Takes ownership of 'dn'.
QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(*cq, params, dn);
verify(soln);
WorkingSet* ws;
PlanStage* root;
verify(StageBuilder::build(*soln, &root, &ws));
*out = new SingleSolutionRunner(cq, soln, root, ws);
return Status::OK();
}
// See if we can answer the query in a fast-distinct compatible fashion.
vector<QuerySolution*> solutions;
status = QueryPlanner::plan(*cq, plannerParams, &solutions);
if (!status.isOK()) {
return getRunner(cq, out);
}
// XXX: why do we need to do this? planner should prob do this internally
cq->root()->resetTag();
// We look for a solution that has an ixscan we can turn into a distinctixscan
for (size_t i = 0; i < solutions.size(); ++i) {
if (turnIxscanIntoDistinctIxscan(solutions[i], field)) {
//.........这里部分代码省略.........
示例10: repairDatabase
//.........这里部分代码省略.........
if ( obj["options"].isABSONObj() ) {
Status status = options.parse( obj["options"].Obj() );
if ( !status.isOK() )
return status;
}
namespacesToCopy[ns] = options;
}
}
}
for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin();
i != namespacesToCopy.end();
++i ) {
string ns = i->first;
CollectionOptions options = i->second;
Collection* tempCollection = NULL;
{
Client::Context tempContext( ns, tempDatabase );
tempCollection = tempDatabase->createCollection( ns, options, true, false );
}
Client::Context readContext( ns, originalDatabase );
Collection* originalCollection = originalDatabase->getCollection( ns );
invariant( originalCollection );
// data
MultiIndexBlock indexBlock( tempCollection );
{
vector<BSONObj> indexes;
IndexCatalog::IndexIterator ii =
originalCollection->getIndexCatalog()->getIndexIterator( false );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
indexes.push_back( desc->infoObj() );
}
Client::Context tempContext( ns, tempDatabase );
Status status = indexBlock.init( indexes );
if ( !status.isOK() )
return status;
}
scoped_ptr<CollectionIterator> iterator( originalCollection->getIterator( DiskLoc(),
false,
CollectionScanParams::FORWARD ) );
while ( !iterator->isEOF() ) {
DiskLoc loc = iterator->getNext();
invariant( !loc.isNull() );
BSONObj doc = originalCollection->docFor( loc );
Client::Context tempContext( ns, tempDatabase );
StatusWith<DiskLoc> result = tempCollection->insertDocument( doc, indexBlock );
if ( !result.isOK() )
return result.getStatus();
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt(false);
}
{
Client::Context tempContext( ns, tempDatabase );
Status status = indexBlock.commit();
示例11: getRunner
/**
* For a given query, get a runner. The runner could be a SingleSolutionRunner, a
* CachedQueryRunner, or a MultiPlanRunner, depending on the cache/query solver/etc.
*/
Status getRunner(Collection* collection, CanonicalQuery* rawCanonicalQuery,
Runner** out, size_t plannerOptions) {
verify(rawCanonicalQuery);
auto_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
// This can happen as we're called by internal clients as well.
if (NULL == collection) {
const string& ns = canonicalQuery->ns();
*out = new EOFRunner(canonicalQuery.release(), ns);
return Status::OK();
}
// If we have an _id index we can use the idhack runner.
if (canUseIDHack(*canonicalQuery) && collection->getIndexCatalog()->findIdIndex()) {
*out = new IDHackRunner(collection, canonicalQuery.release());
return Status::OK();
}
// If it's not NULL, we may have indices. Access the catalog and fill out IndexEntry(s)
QueryPlannerParams plannerParams;
IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(false);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
desc->isMultikey(),
desc->isSparse(),
desc->indexName(),
desc->infoObj()));
}
// If query supports admin hint, filter params.indices by indexes in query settings.
QuerySettings* querySettings = collection->infoCache()->getQuerySettings();
AllowedIndices* allowedIndicesRaw;
// Filter index catalog if admin hint is specified for query.
// Also, signal to planner that application hint should be ignored.
if (querySettings->getAllowedIndices(*canonicalQuery, &allowedIndicesRaw)) {
boost::scoped_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
filterAllowedIndexEntries(*allowedIndices, &plannerParams.indices);
plannerParams.adminHintApplied = true;
}
// Tailable: If the query requests tailable the collection must be capped.
if (canonicalQuery->getParsed().hasOption(QueryOption_CursorTailable)) {
if (!collection->isCapped()) {
return Status(ErrorCodes::BadValue,
"error processing query: " + canonicalQuery->toString() +
" tailable cursor requested on non capped collection");
}
// If a sort is specified it must be equal to expectedSort.
const BSONObj expectedSort = BSON("$natural" << 1);
const BSONObj& actualSort = canonicalQuery->getParsed().getSort();
if (!actualSort.isEmpty() && !(actualSort == expectedSort)) {
return Status(ErrorCodes::BadValue,
"error processing query: " + canonicalQuery->toString() +
" invalid sort specified for tailable cursor: "
+ actualSort.toString());
}
}
// Process the planning options.
plannerParams.options = plannerOptions;
if (storageGlobalParams.noTableScan) {
const string& ns = canonicalQuery->ns();
// There are certain cases where we ignore this restriction:
bool ignore = canonicalQuery->getQueryObj().isEmpty()
|| (string::npos != ns.find(".system."))
|| (0 == ns.find("local."));
if (!ignore) {
plannerParams.options |= QueryPlannerParams::NO_TABLE_SCAN;
}
}
if (!(plannerParams.options & QueryPlannerParams::NO_TABLE_SCAN)) {
plannerParams.options |= QueryPlannerParams::INCLUDE_COLLSCAN;
}
// If the caller wants a shard filter, make sure we're actually sharded.
if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
CollectionMetadataPtr collMetadata =
shardingState.getCollectionMetadata(canonicalQuery->ns());
if (collMetadata) {
plannerParams.shardKey = collMetadata->getKeyPattern();
}
else {
// If there's no metadata don't bother w/the shard filter since we won't know what
// the key pattern is anyway...
plannerParams.options &= ~QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
}
// Try to look up a cached solution for the query.
//.........这里部分代码省略.........
示例12: computeIndexKeys
void CollectionInfoCache::computeIndexKeys(OperationContext* opCtx) {
_indexedPaths.clear();
bool hadTTLIndex = _hasTTLIndex;
_hasTTLIndex = false;
IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(opCtx, true);
while (i.more()) {
IndexDescriptor* descriptor = i.next();
if (descriptor->getAccessMethodName() != IndexNames::TEXT) {
BSONObj key = descriptor->keyPattern();
const BSONObj& infoObj = descriptor->infoObj();
if (infoObj.hasField("expireAfterSeconds")) {
_hasTTLIndex = true;
}
BSONObjIterator j(key);
while (j.more()) {
BSONElement e = j.next();
_indexedPaths.addPath(e.fieldName());
}
} else {
fts::FTSSpec ftsSpec(descriptor->infoObj());
if (ftsSpec.wildcard()) {
_indexedPaths.allPathsIndexed();
} else {
for (size_t i = 0; i < ftsSpec.numExtraBefore(); ++i) {
_indexedPaths.addPath(ftsSpec.extraBefore(i));
}
for (fts::Weights::const_iterator it = ftsSpec.weights().begin();
it != ftsSpec.weights().end();
++it) {
_indexedPaths.addPath(it->first);
}
for (size_t i = 0; i < ftsSpec.numExtraAfter(); ++i) {
_indexedPaths.addPath(ftsSpec.extraAfter(i));
}
// Any update to a path containing "language" as a component could change the
// language of a subdocument. Add the override field as a path component.
_indexedPaths.addPathComponent(ftsSpec.languageOverrideField());
}
}
// handle partial indexes
const IndexCatalogEntry* entry = i.catalogEntry(descriptor);
const MatchExpression* filter = entry->getFilterExpression();
if (filter) {
unordered_set<std::string> paths;
QueryPlannerIXSelect::getFields(filter, "", &paths);
for (auto it = paths.begin(); it != paths.end(); ++it) {
_indexedPaths.addPath(*it);
}
}
}
TTLCollectionCache& ttlCollectionCache = TTLCollectionCache::get(getGlobalServiceContext());
if (_hasTTLIndex != hadTTLIndex) {
if (_hasTTLIndex) {
ttlCollectionCache.registerCollection(_collection->ns());
} else {
ttlCollectionCache.unregisterCollection(_collection->ns());
}
}
_keysComputed = true;
}
示例13: objOld
StatusWith<DiskLoc> Collection::updateDocument( const DiskLoc& oldLocation,
const BSONObj& objNew,
bool enforceQuota,
OpDebug* debug ) {
Record* oldRecord = _recordStore->recordFor( oldLocation );
BSONObj objOld( oldRecord->accessed()->data() );
if ( objOld.hasElement( "_id" ) ) {
BSONElement oldId = objOld["_id"];
BSONElement newId = objNew["_id"];
if ( oldId != newId )
return StatusWith<DiskLoc>( ErrorCodes::InternalError,
"in Collection::updateDocument _id mismatch",
13596 );
}
if ( ns().coll() == "system.users" ) {
// XXX - andy and spencer think this should go away now
V2UserDocumentParser parser;
Status s = parser.checkValidUserDocument(objNew);
if ( !s.isOK() )
return StatusWith<DiskLoc>( s );
}
/* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
*/
OwnedPointerMap<IndexDescriptor*,UpdateTicket> updateTickets;
IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( true );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed =
!(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique())
|| ignoreUniqueIndex(descriptor);
UpdateTicket* updateTicket = new UpdateTicket();
updateTickets.mutableMap()[descriptor] = updateTicket;
Status ret = iam->validateUpdate(objOld, objNew, oldLocation, options, updateTicket );
if ( !ret.isOK() ) {
return StatusWith<DiskLoc>( ret );
}
}
if ( oldRecord->netLength() < objNew.objsize() ) {
// doesn't fit, have to move to new location
if ( _details->isCapped() )
return StatusWith<DiskLoc>( ErrorCodes::InternalError,
"failing update: objects in a capped ns cannot grow",
10003 );
moveCounter.increment();
_details->paddingTooSmall();
// unindex old record, don't delete
// this way, if inserting new doc fails, we can re-index this one
_cursorCache.invalidateDocument(oldLocation, INVALIDATION_DELETION);
_indexCatalog.unindexRecord( objOld, oldLocation, true );
if ( debug ) {
if (debug->nmoved == -1) // default of -1 rather than 0
debug->nmoved = 1;
else
debug->nmoved += 1;
}
StatusWith<DiskLoc> loc = _insertDocument( objNew, enforceQuota );
if ( loc.isOK() ) {
// insert successful, now lets deallocate the old location
// remember its already unindexed
_recordStore->deleteRecord( oldLocation );
}
else {
// new doc insert failed, so lets re-index the old document and location
_indexCatalog.indexRecord( objOld, oldLocation );
}
return loc;
}
_infoCache.notifyOfWriteOp();
_details->paddingFits();
if ( debug )
debug->keyUpdates = 0;
ii = _indexCatalog.getIndexIterator( true );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
int64_t updatedKeys;
Status ret = iam->update(*updateTickets.mutableMap()[descriptor], &updatedKeys);
if ( !ret.isOK() )
return StatusWith<DiskLoc>( ret );
//.........这里部分代码省略.........
示例14: run
virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string source = cmdObj.getStringField( name.c_str() );
string target = cmdObj.getStringField( "to" );
if ( !NamespaceString::validCollectionComponent(target.c_str()) ) {
errmsg = "invalid collection name: " + target;
return false;
}
if ( source.empty() || target.empty() ) {
errmsg = "invalid command syntax";
return false;
}
if (!fromRepl) { // If it got through on the master, need to allow it here too
Status sourceStatus = userAllowedWriteNS(source);
if (!sourceStatus.isOK()) {
errmsg = "error with source namespace: " + sourceStatus.reason();
return false;
}
Status targetStatus = userAllowedWriteNS(target);
if (!targetStatus.isOK()) {
errmsg = "error with target namespace: " + targetStatus.reason();
return false;
}
}
string sourceDB = nsToDatabase(source);
string targetDB = nsToDatabase(target);
bool capped = false;
long long size = 0;
std::vector<BSONObj> indexesInProg;
Lock::GlobalWrite globalWriteLock;
DurTransaction txn;
{
Client::Context srcCtx( source );
Collection* sourceColl = srcCtx.db()->getCollection( source );
if ( !sourceColl ) {
errmsg = "source namespace does not exist";
return false;
}
// Ensure that collection name does not exceed maximum length.
// Ensure that index names do not push the length over the max.
// Iterator includes unfinished indexes.
IndexCatalog::IndexIterator sourceIndIt =
sourceColl->getIndexCatalog()->getIndexIterator( true );
int longestIndexNameLength = 0;
while ( sourceIndIt.more() ) {
int thisLength = sourceIndIt.next()->indexName().length();
if ( thisLength > longestIndexNameLength )
longestIndexNameLength = thisLength;
}
unsigned int longestAllowed =
min(int(Namespace::MaxNsColletionLen),
int(Namespace::MaxNsLen) - 2/*strlen(".$")*/ - longestIndexNameLength);
if (target.size() > longestAllowed) {
StringBuilder sb;
sb << "collection name length of " << target.size()
<< " exceeds maximum length of " << longestAllowed
<< ", allowing for index names";
errmsg = sb.str();
return false;
}
{
indexesInProg = stopIndexBuilds( srcCtx.db(), cmdObj );
capped = sourceColl->isCapped();
if ( capped ) {
size = sourceColl->storageSize();
}
}
}
{
Client::Context ctx( target );
// Check if the target namespace exists and if dropTarget is true.
// If target exists and dropTarget is not true, return false.
if ( ctx.db()->getCollection( target ) ) {
if ( !cmdObj["dropTarget"].trueValue() ) {
errmsg = "target namespace exists";
return false;
}
Status s = ctx.db()->dropCollection( &txn, target );
if ( !s.isOK() ) {
errmsg = s.toString();
restoreIndexBuildsOnSource( indexesInProg, source );
return false;
}
}
// If we are renaming in the same database, just
//.........这里部分代码省略.........
示例15: Status
//.........这里部分代码省略.........
if ( !status.isOK() )
return status;
}
namespacesToCopy[ns] = options;
}
}
}
for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin();
i != namespacesToCopy.end();
++i ) {
string ns = i->first;
CollectionOptions options = i->second;
Collection* tempCollection = NULL;
{
WriteUnitOfWork wunit(txn);
tempCollection = tempDatabase->createCollection(txn, ns, options, false);
wunit.commit();
}
OldClientContext readContext(txn, ns, originalDatabase);
Collection* originalCollection = originalDatabase->getCollection( ns );
invariant( originalCollection );
// data
// TODO SERVER-14812 add a mode that drops duplicates rather than failing
MultiIndexBlock indexer(txn, tempCollection );
{
vector<BSONObj> indexes;
IndexCatalog::IndexIterator ii =
originalCollection->getIndexCatalog()->getIndexIterator( txn, false );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
indexes.push_back( desc->infoObj() );
}
Status status = indexer.init( indexes );
if (!status.isOK()) {
return status;
}
}
auto cursor = originalCollection->getCursor(txn);
while (auto record = cursor->next()) {
BSONObj doc = record->data.releaseToBson();
WriteUnitOfWork wunit(txn);
StatusWith<RecordId> result = tempCollection->insertDocument(txn,
doc,
&indexer,
false);
if ( !result.isOK() )
return result.getStatus();
wunit.commit();
txn->checkForInterrupt();
}
Status status = indexer.doneInserting();
if (!status.isOK())
return status;
{
WriteUnitOfWork wunit(txn);