本文整理汇总了C++中IndexDescriptor::infoObj方法的典型用法代码示例。如果您正苦于以下问题:C++ IndexDescriptor::infoObj方法的具体用法?C++ IndexDescriptor::infoObj怎么用?C++ IndexDescriptor::infoObj使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IndexDescriptor
的用法示例。
在下文中一共展示了IndexDescriptor::infoObj方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run
void run() {
OperationContextImpl txn;
Client::WriteContext ctx(&txn, _ns);
int numFinishedIndexesStart = _catalog->numIndexesReady(&txn);
Helpers::ensureIndex(&txn, _coll, BSON("x" << 1), false, "_x_0");
Helpers::ensureIndex(&txn, _coll, BSON("y" << 1), false, "_y_0");
ASSERT_TRUE(_catalog->numIndexesReady(&txn) == numFinishedIndexesStart+2);
IndexCatalog::IndexIterator ii = _catalog->getIndexIterator(&txn,false);
int indexesIterated = 0;
bool foundIndex = false;
while (ii.more()) {
IndexDescriptor* indexDesc = ii.next();
indexesIterated++;
BSONObjIterator boit(indexDesc->infoObj());
while (boit.more() && !foundIndex) {
BSONElement e = boit.next();
if (str::equals(e.fieldName(), "name") &&
str::equals(e.valuestrsafe(), "_y_0")) {
foundIndex = true;
break;
}
}
}
ctx.commit();
ASSERT_TRUE(indexesIterated == _catalog->numIndexesReady(&txn));
ASSERT_TRUE(foundIndex);
}
示例2: computeIndexKeys
void CollectionInfoCache::computeIndexKeys() {
DEV Lock::assertWriteLocked( _collection->ns().ns() );
_indexedPaths.clear();
IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(true);
while (i.more()) {
IndexDescriptor* descriptor = i.next();
if (descriptor->getAccessMethodName() != IndexNames::TEXT) {
BSONObj key = descriptor->keyPattern();
BSONObjIterator j(key);
while (j.more()) {
BSONElement e = j.next();
_indexedPaths.addPath(e.fieldName());
}
}
else {
fts::FTSSpec ftsSpec(descriptor->infoObj());
if (ftsSpec.wildcard()) {
_indexedPaths.allPathsIndexed();
}
else {
for (size_t i = 0; i < ftsSpec.numExtraBefore(); ++i) {
_indexedPaths.addPath(ftsSpec.extraBefore(i));
}
for (fts::Weights::const_iterator it = ftsSpec.weights().begin();
it != ftsSpec.weights().end();
++it) {
_indexedPaths.addPath(it->first);
}
for (size_t i = 0; i < ftsSpec.numExtraAfter(); ++i) {
_indexedPaths.addPath(ftsSpec.extraAfter(i));
}
// Any update to a path containing "language" as a component could change the
// language of a subdocument. Add the override field as a path component.
_indexedPaths.addPathComponent(ftsSpec.languageOverrideField());
}
}
}
_keysComputed = true;
}
示例3: run
void run() {
Client::WriteContext ctx(_ns);
int numFinishedIndexesStart = _catalog->numIndexesReady();
BSONObjBuilder b1;
b1.append("key", BSON("x" << 1));
b1.append("ns", _ns);
b1.append("name", "_x_0");
_catalog->createIndex(b1.obj(), true);
BSONObjBuilder b2;
b2.append("key", BSON("y" << 1));
b2.append("ns", _ns);
b2.append("name", "_y_0");
_catalog->createIndex(b2.obj(), true);
ASSERT_TRUE(_catalog->numIndexesReady() == numFinishedIndexesStart+2);
IndexCatalog::IndexIterator ii = _catalog->getIndexIterator(false);
int indexesIterated = 0;
bool foundIndex = false;
while (ii.more()) {
IndexDescriptor* indexDesc = ii.next();
indexesIterated++;
BSONObjIterator boit(indexDesc->infoObj());
while (boit.more() && !foundIndex) {
BSONElement e = boit.next();
if (str::equals(e.fieldName(), "name") &&
str::equals(e.valuestrsafe(), "_y_0")) {
foundIndex = true;
break;
}
}
}
ASSERT_TRUE(indexesIterated == _catalog->numIndexesReady());
ASSERT_TRUE(foundIndex);
}
示例4: ii
StatusWith<CompactStats> Collection::compact( const CompactOptions* compactOptions ) {
if ( isCapped() )
return StatusWith<CompactStats>( ErrorCodes::BadValue,
"cannot compact capped collection" );
if ( _indexCatalog.numIndexesInProgress() )
return StatusWith<CompactStats>( ErrorCodes::BadValue,
"cannot compact when indexes in progress" );
NamespaceDetails* d = details();
// this is a big job, so might as well make things tidy before we start just to be nice.
getDur().commitIfNeeded();
list<DiskLoc> extents;
for( DiskLoc L = d->firstExtent(); !L.isNull(); L = L.ext()->xnext )
extents.push_back(L);
log() << "compact " << extents.size() << " extents" << endl;
// same data, but might perform a little different after compact?
_infoCache.reset();
vector<BSONObj> indexSpecs;
{
IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( false ) );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj());
const BSONObj key = spec.getObjectField("key");
const Status keyStatus = validateKeyPattern(key);
if (!keyStatus.isOK()) {
return StatusWith<CompactStats>(
ErrorCodes::CannotCreateIndex,
str::stream() << "Cannot rebuild index " << spec << ": "
<< keyStatus.reason()
<< " For more info see"
<< " http://dochub.mongodb.org/core/index-validation");
}
indexSpecs.push_back(spec);
}
}
log() << "compact orphan deleted lists" << endl;
d->orphanDeletedList();
// Start over from scratch with our extent sizing and growth
d->setLastExtentSize( 0 );
// before dropping indexes, at least make sure we can allocate one extent!
// this will allocate an extent and add to free list
// if it cannot, it will throw an exception
increaseStorageSize( _details->lastExtentSize(), true );
// note that the drop indexes call also invalidates all clientcursors for the namespace,
// which is important and wanted here
log() << "compact dropping indexes" << endl;
Status status = _indexCatalog.dropAllIndexes( true );
if ( !status.isOK() ) {
return StatusWith<CompactStats>( status );
}
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt();
CompactStats stats;
MultiIndexBlock multiIndexBlock( this );
status = multiIndexBlock.init( indexSpecs );
if ( !status.isOK() )
return StatusWith<CompactStats>( status );
// reset data size and record counts to 0 for this namespace
// as we're about to tally them up again for each new extent
d->setStats( 0, 0 );
ProgressMeterHolder pm(cc().curop()->setMessage("compact extent",
"Extent Compacting Progress",
extents.size()));
int extentNumber = 0;
for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
_compactExtent(*i, extentNumber++, multiIndexBlock, compactOptions, &stats );
pm.hit();
}
verify( d->firstExtent().ext()->xprev.isNull() );
// indexes will do their own progress meter?
pm.finished();
log() << "starting index commits";
status = multiIndexBlock.commit();
if ( !status.isOK() )
return StatusWith<CompactStats>( status );
return StatusWith<CompactStats>( stats );
}
示例5: ii
StatusWith<CompactStats> Collection::compact( OperationContext* txn,
const CompactOptions* compactOptions ) {
if ( !_recordStore->compactSupported() )
return StatusWith<CompactStats>( ErrorCodes::BadValue,
str::stream() <<
"cannot compact collection with record store: " <<
_recordStore->name() );
if ( _indexCatalog.numIndexesInProgress() )
return StatusWith<CompactStats>( ErrorCodes::BadValue,
"cannot compact when indexes in progress" );
// same data, but might perform a little different after compact?
_infoCache.reset();
vector<BSONObj> indexSpecs;
{
IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( false ) );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj());
const BSONObj key = spec.getObjectField("key");
const Status keyStatus = validateKeyPattern(key);
if (!keyStatus.isOK()) {
return StatusWith<CompactStats>(
ErrorCodes::CannotCreateIndex,
str::stream() << "Cannot compact collection due to invalid index "
<< spec << ": " << keyStatus.reason() << " For more info see"
<< " http://dochub.mongodb.org/core/index-validation");
}
indexSpecs.push_back(spec);
}
}
// note that the drop indexes call also invalidates all clientcursors for the namespace,
// which is important and wanted here
log() << "compact dropping indexes" << endl;
Status status = _indexCatalog.dropAllIndexes(txn, true);
if ( !status.isOK() ) {
return StatusWith<CompactStats>( status );
}
txn->checkForInterrupt();
CompactStats stats;
MultiIndexBlock multiIndexBlock(txn, this);
status = multiIndexBlock.init( indexSpecs );
if ( !status.isOK() )
return StatusWith<CompactStats>( status );
MyCompactAdaptor adaptor(this, &multiIndexBlock);
_recordStore->compact( txn, &adaptor, compactOptions, &stats );
log() << "starting index commits";
status = multiIndexBlock.commit();
if ( !status.isOK() )
return StatusWith<CompactStats>( status );
return StatusWith<CompactStats>( stats );
}
示例6: ii
StatusWith<CompactStats> Collection::compact( const CompactOptions* compactOptions ) {
if ( isCapped() )
return StatusWith<CompactStats>( ErrorCodes::BadValue,
"cannot compact capped collection" );
if ( _indexCatalog.numIndexesInProgress() )
return StatusWith<CompactStats>( ErrorCodes::BadValue,
"cannot compact when indexes in progress" );
NamespaceDetails* d = details();
// this is a big job, so might as well make things tidy before we start just to be nice.
getDur().commitIfNeeded();
list<DiskLoc> extents;
for( DiskLoc L = d->firstExtent(); !L.isNull(); L = L.ext()->xnext )
extents.push_back(L);
log() << "compact " << extents.size() << " extents" << endl;
// same data, but might perform a little different after compact?
_infoCache.reset();
vector<BSONObj> indexSpecs;
{
IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( false ) );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
indexSpecs.push_back( _compactAdjustIndexSpec( descriptor->infoObj() ) );
}
}
log() << "compact orphan deleted lists" << endl;
d->orphanDeletedList();
// Start over from scratch with our extent sizing and growth
d->setLastExtentSize( 0 );
// before dropping indexes, at least make sure we can allocate one extent!
if ( allocateSpaceForANewRecord( _ns.ns().c_str(),
d,
Record::HeaderSize+1,
false).isNull() ) {
return StatusWith<CompactStats>( ErrorCodes::InternalError,
"compact error no space available to allocate" );
}
// note that the drop indexes call also invalidates all clientcursors for the namespace,
// which is important and wanted here
log() << "compact dropping indexes" << endl;
Status status = _indexCatalog.dropAllIndexes( true );
if ( !status.isOK() ) {
return StatusWith<CompactStats>( status );
}
getDur().commitIfNeeded();
CompactStats stats;
OwnedPointerVector<IndexCatalog::IndexBuildBlock> indexBuildBlocks;
vector<IndexAccessMethod*> indexesToInsertTo;
vector< std::pair<IndexAccessMethod*,IndexAccessMethod*> > bulkToCommit;
for ( size_t i = 0; i < indexSpecs.size(); i++ ) {
killCurrentOp.checkForInterrupt(false);
BSONObj info = indexSpecs[i];
info = _compactAdjustIndexSpec( info );
info = _indexCatalog.fixIndexSpec( info );
auto_ptr<IndexCatalog::IndexBuildBlock> block( new IndexCatalog::IndexBuildBlock( this,info ) );
Status status = block->init();
if ( !status.isOK() )
return StatusWith<CompactStats>(status);
IndexAccessMethod* accessMethod = block->getEntry()->accessMethod();
status = accessMethod->initializeAsEmpty();
if ( !status.isOK() )
return StatusWith<CompactStats>(status);
IndexAccessMethod* bulk = accessMethod->initiateBulk();
if ( bulk ) {
indexesToInsertTo.push_back( bulk );
bulkToCommit.push_back( std::pair<IndexAccessMethod*,IndexAccessMethod*>( accessMethod, bulk ) );
}
else {
indexesToInsertTo.push_back( accessMethod );
}
indexBuildBlocks.mutableVector().push_back( block.release() );
}
// reset data size and record counts to 0 for this namespace
// as we're about to tally them up again for each new extent
d->setStats( 0, 0 );
ProgressMeterHolder pm(cc().curop()->setMessage("compact extent",
"Extent Compacting Progress",
extents.size()));
int extentNumber = 0;
for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
_compactExtent(*i, extentNumber++, indexesToInsertTo, compactOptions, &stats );
//.........这里部分代码省略.........
示例7: Status
//.........这里部分代码省略.........
}
namespacesToCopy[ns] = options;
}
}
}
for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin();
i != namespacesToCopy.end();
++i ) {
string ns = i->first;
CollectionOptions options = i->second;
Collection* tempCollection = NULL;
{
WriteUnitOfWork wunit(txn);
tempCollection = tempDatabase->createCollection(txn, ns, options, false);
wunit.commit();
}
OldClientContext readContext(txn, ns, originalDatabase);
Collection* originalCollection = originalDatabase->getCollection( ns );
invariant( originalCollection );
// data
// TODO SERVER-14812 add a mode that drops duplicates rather than failing
MultiIndexBlock indexer(txn, tempCollection );
{
vector<BSONObj> indexes;
IndexCatalog::IndexIterator ii =
originalCollection->getIndexCatalog()->getIndexIterator( txn, false );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
indexes.push_back( desc->infoObj() );
}
Status status = indexer.init( indexes );
if (!status.isOK()) {
return status;
}
}
auto cursor = originalCollection->getCursor(txn);
while (auto record = cursor->next()) {
BSONObj doc = record->data.releaseToBson();
WriteUnitOfWork wunit(txn);
StatusWith<RecordId> result = tempCollection->insertDocument(txn,
doc,
&indexer,
false);
if ( !result.isOK() )
return result.getStatus();
wunit.commit();
txn->checkForInterrupt();
}
Status status = indexer.doneInserting();
if (!status.isOK())
return status;
{
WriteUnitOfWork wunit(txn);
indexer.commit();
wunit.commit();
示例8: Status
//.........这里部分代码省略.........
namespacesToCopy[ns] = options;
}
}
}
for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin();
i != namespacesToCopy.end();
++i ) {
string ns = i->first;
CollectionOptions options = i->second;
Collection* tempCollection = NULL;
{
Client::Context tempContext(txn, ns, tempDatabase );
WriteUnitOfWork wunit(txn);
tempCollection = tempDatabase->createCollection(txn, ns, options, true, false);
wunit.commit();
}
Client::Context readContext(txn, ns, originalDatabase);
Collection* originalCollection = originalDatabase->getCollection( txn, ns );
invariant( originalCollection );
// data
// TODO SERVER-14812 add a mode that drops duplicates rather than failing
MultiIndexBlock indexer(txn, tempCollection );
{
vector<BSONObj> indexes;
IndexCatalog::IndexIterator ii =
originalCollection->getIndexCatalog()->getIndexIterator( false );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
indexes.push_back( desc->infoObj() );
}
Client::Context tempContext(txn, ns, tempDatabase);
Status status = indexer.init( indexes );
if ( !status.isOK() )
return status;
}
scoped_ptr<RecordIterator> iterator(
originalCollection->getIterator( txn, DiskLoc(), false,
CollectionScanParams::FORWARD ));
while ( !iterator->isEOF() ) {
DiskLoc loc = iterator->getNext();
invariant( !loc.isNull() );
BSONObj doc = originalCollection->docFor( loc );
Client::Context tempContext(txn, ns, tempDatabase);
WriteUnitOfWork wunit(txn);
StatusWith<DiskLoc> result = tempCollection->insertDocument(txn,
doc,
&indexer,
false);
if ( !result.isOK() )
return result.getStatus();
wunit.commit();
txn->checkForInterrupt(false);
}
Status status = indexer.doneInserting();
示例9: validationDisabler
StatusWith<CompactStats> compactCollection(OperationContext* opCtx,
Collection* collection,
const CompactOptions* compactOptions) {
dassert(opCtx->lockState()->isCollectionLockedForMode(collection->ns().toString(), MODE_X));
DisableDocumentValidation validationDisabler(opCtx);
auto recordStore = collection->getRecordStore();
auto indexCatalog = collection->getIndexCatalog();
if (!recordStore->compactSupported())
return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported,
str::stream()
<< "cannot compact collection with record store: "
<< recordStore->name());
if (recordStore->compactsInPlace()) {
CompactStats stats;
Status status = recordStore->compact(opCtx);
if (!status.isOK())
return StatusWith<CompactStats>(status);
// Compact all indexes (not including unfinished indexes)
std::unique_ptr<IndexCatalog::IndexIterator> ii(
indexCatalog->getIndexIterator(opCtx, false));
while (ii->more()) {
IndexCatalogEntry* entry = ii->next();
IndexDescriptor* descriptor = entry->descriptor();
IndexAccessMethod* iam = entry->accessMethod();
LOG(1) << "compacting index: " << descriptor->toString();
Status status = iam->compact(opCtx);
if (!status.isOK()) {
error() << "failed to compact index: " << descriptor->toString();
return status;
}
}
return StatusWith<CompactStats>(stats);
}
if (indexCatalog->numIndexesInProgress(opCtx))
return StatusWith<CompactStats>(ErrorCodes::BadValue,
"cannot compact when indexes in progress");
std::vector<BSONObj> indexSpecs;
{
std::unique_ptr<IndexCatalog::IndexIterator> ii(
indexCatalog->getIndexIterator(opCtx, false));
while (ii->more()) {
IndexDescriptor* descriptor = ii->next()->descriptor();
// Compact always creates the new index in the foreground.
const BSONObj spec =
descriptor->infoObj().removeField(IndexDescriptor::kBackgroundFieldName);
const BSONObj key = spec.getObjectField("key");
const Status keyStatus =
index_key_validate::validateKeyPattern(key, descriptor->version());
if (!keyStatus.isOK()) {
return StatusWith<CompactStats>(
ErrorCodes::CannotCreateIndex,
str::stream() << "Cannot compact collection due to invalid index " << spec
<< ": "
<< keyStatus.reason()
<< " For more info see"
<< " http://dochub.mongodb.org/core/index-validation");
}
indexSpecs.push_back(spec);
}
}
// Give a chance to be interrupted *before* we drop all indexes.
opCtx->checkForInterrupt();
{
// note that the drop indexes call also invalidates all clientcursors for the namespace,
// which is important and wanted here
WriteUnitOfWork wunit(opCtx);
log() << "compact dropping indexes";
indexCatalog->dropAllIndexes(opCtx, true);
wunit.commit();
}
CompactStats stats;
MultiIndexBlockImpl indexer(opCtx, collection);
indexer.allowInterruption();
indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking
Status status = indexer.init(indexSpecs).getStatus();
if (!status.isOK())
return StatusWith<CompactStats>(status);
status = recordStore->compact(opCtx);
if (!status.isOK())
return StatusWith<CompactStats>(status);
log() << "starting index commits";
status = indexer.dumpInsertsFromBulk();
if (!status.isOK())
//.........这里部分代码省略.........
示例10: validationDisabler
StatusWith<CompactStats> Collection::compact(OperationContext* txn,
const CompactOptions* compactOptions) {
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
DisableDocumentValidation validationDisabler(txn);
if (!_recordStore->compactSupported())
return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported,
str::stream()
<< "cannot compact collection with record store: "
<< _recordStore->name());
if (_recordStore->compactsInPlace()) {
CompactStats stats;
Status status = _recordStore->compact(txn, NULL, compactOptions, &stats);
if (!status.isOK())
return StatusWith<CompactStats>(status);
// Compact all indexes (not including unfinished indexes)
IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false));
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* index = _indexCatalog.getIndex(descriptor);
LOG(1) << "compacting index: " << descriptor->toString();
Status status = index->compact(txn);
if (!status.isOK()) {
error() << "failed to compact index: " << descriptor->toString();
return status;
}
}
return StatusWith<CompactStats>(stats);
}
if (_indexCatalog.numIndexesInProgress(txn))
return StatusWith<CompactStats>(ErrorCodes::BadValue,
"cannot compact when indexes in progress");
vector<BSONObj> indexSpecs;
{
IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false));
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj());
const BSONObj key = spec.getObjectField("key");
const Status keyStatus = validateKeyPattern(key);
if (!keyStatus.isOK()) {
return StatusWith<CompactStats>(
ErrorCodes::CannotCreateIndex,
str::stream() << "Cannot compact collection due to invalid index " << spec
<< ": "
<< keyStatus.reason()
<< " For more info see"
<< " http://dochub.mongodb.org/core/index-validation");
}
indexSpecs.push_back(spec);
}
}
// Give a chance to be interrupted *before* we drop all indexes.
txn->checkForInterrupt();
{
// note that the drop indexes call also invalidates all clientcursors for the namespace,
// which is important and wanted here
WriteUnitOfWork wunit(txn);
log() << "compact dropping indexes";
Status status = _indexCatalog.dropAllIndexes(txn, true);
if (!status.isOK()) {
return StatusWith<CompactStats>(status);
}
wunit.commit();
}
CompactStats stats;
MultiIndexBlock indexer(txn, this);
indexer.allowInterruption();
indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking
Status status = indexer.init(indexSpecs);
if (!status.isOK())
return StatusWith<CompactStats>(status);
MyCompactAdaptor adaptor(this, &indexer);
status = _recordStore->compact(txn, &adaptor, compactOptions, &stats);
if (!status.isOK())
return StatusWith<CompactStats>(status);
log() << "starting index commits";
status = indexer.doneInserting();
if (!status.isOK())
return StatusWith<CompactStats>(status);
{
WriteUnitOfWork wunit(txn);
indexer.commit();
//.........这里部分代码省略.........
示例11: repairDatabase
//.........这里部分代码省略.........
if ( !status.isOK() )
return status;
}
namespacesToCopy[ns] = options;
}
}
}
for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin();
i != namespacesToCopy.end();
++i ) {
string ns = i->first;
CollectionOptions options = i->second;
Collection* tempCollection = NULL;
{
Client::Context tempContext( ns, tempDatabase );
tempCollection = tempDatabase->createCollection( ns, options, true, false );
}
Client::Context readContext( ns, originalDatabase );
Collection* originalCollection = originalDatabase->getCollection( ns );
invariant( originalCollection );
// data
MultiIndexBlock indexBlock( tempCollection );
{
vector<BSONObj> indexes;
IndexCatalog::IndexIterator ii =
originalCollection->getIndexCatalog()->getIndexIterator( false );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
indexes.push_back( desc->infoObj() );
}
Client::Context tempContext( ns, tempDatabase );
Status status = indexBlock.init( indexes );
if ( !status.isOK() )
return status;
}
scoped_ptr<CollectionIterator> iterator( originalCollection->getIterator( DiskLoc(),
false,
CollectionScanParams::FORWARD ) );
while ( !iterator->isEOF() ) {
DiskLoc loc = iterator->getNext();
invariant( !loc.isNull() );
BSONObj doc = originalCollection->docFor( loc );
Client::Context tempContext( ns, tempDatabase );
StatusWith<DiskLoc> result = tempCollection->insertDocument( doc, indexBlock );
if ( !result.isOK() )
return result.getStatus();
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt(false);
}
{
Client::Context tempContext( ns, tempDatabase );
Status status = indexBlock.commit();
if ( !status.isOK() )
return status;
示例12: computeIndexKeys
void CollectionInfoCache::computeIndexKeys(OperationContext* opCtx) {
_indexedPaths.clear();
bool hadTTLIndex = _hasTTLIndex;
_hasTTLIndex = false;
IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(opCtx, true);
while (i.more()) {
IndexDescriptor* descriptor = i.next();
if (descriptor->getAccessMethodName() != IndexNames::TEXT) {
BSONObj key = descriptor->keyPattern();
const BSONObj& infoObj = descriptor->infoObj();
if (infoObj.hasField("expireAfterSeconds")) {
_hasTTLIndex = true;
}
BSONObjIterator j(key);
while (j.more()) {
BSONElement e = j.next();
_indexedPaths.addPath(e.fieldName());
}
} else {
fts::FTSSpec ftsSpec(descriptor->infoObj());
if (ftsSpec.wildcard()) {
_indexedPaths.allPathsIndexed();
} else {
for (size_t i = 0; i < ftsSpec.numExtraBefore(); ++i) {
_indexedPaths.addPath(ftsSpec.extraBefore(i));
}
for (fts::Weights::const_iterator it = ftsSpec.weights().begin();
it != ftsSpec.weights().end();
++it) {
_indexedPaths.addPath(it->first);
}
for (size_t i = 0; i < ftsSpec.numExtraAfter(); ++i) {
_indexedPaths.addPath(ftsSpec.extraAfter(i));
}
// Any update to a path containing "language" as a component could change the
// language of a subdocument. Add the override field as a path component.
_indexedPaths.addPathComponent(ftsSpec.languageOverrideField());
}
}
// handle partial indexes
const IndexCatalogEntry* entry = i.catalogEntry(descriptor);
const MatchExpression* filter = entry->getFilterExpression();
if (filter) {
unordered_set<std::string> paths;
QueryPlannerIXSelect::getFields(filter, "", &paths);
for (auto it = paths.begin(); it != paths.end(); ++it) {
_indexedPaths.addPath(*it);
}
}
}
TTLCollectionCache& ttlCollectionCache = TTLCollectionCache::get(getGlobalServiceContext());
if (_hasTTLIndex != hadTTLIndex) {
if (_hasTTLIndex) {
ttlCollectionCache.registerCollection(_collection->ns());
} else {
ttlCollectionCache.unregisterCollection(_collection->ns());
}
}
_keysComputed = true;
}