本文整理汇总了C++中IndexAccessMethod类的典型用法代码示例。如果您正苦于以下问题:C++ IndexAccessMethod类的具体用法?C++ IndexAccessMethod怎么用?C++ IndexAccessMethod使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了IndexAccessMethod类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: prefetchIndexPages
// page in pages needed for all index lookups on a given object
void prefetchIndexPages(Collection* collection,
const repl::ReplSetImpl::IndexPrefetchConfig& prefetchConfig,
const BSONObj& obj) {
DiskLoc unusedDl; // unused
BSONObjSet unusedKeys;
// do we want prefetchConfig to be (1) as-is, (2) for update ops only, or (3) configured per op type?
// One might want PREFETCH_NONE for updates, but it's more rare that it is a bad idea for inserts.
// #3 (per op), a big issue would be "too many knobs".
switch (prefetchConfig) {
case repl::ReplSetImpl::PREFETCH_NONE:
return;
case repl::ReplSetImpl::PREFETCH_ID_ONLY:
{
TimerHolder timer( &prefetchIndexStats);
// on the update op case, the call to prefetchRecordPages will touch the _id index.
// thus perhaps this option isn't very useful?
try {
IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex();
if ( !desc )
return;
IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex( desc );
verify( iam );
iam->touch(obj);
}
catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchIndexPages(): " << e.what() << endl;
}
break;
}
case repl::ReplSetImpl::PREFETCH_ALL:
{
// indexCount includes all indexes, including ones
// in the process of being built
IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator( true );
while ( ii.more() ) {
TimerHolder timer( &prefetchIndexStats);
// This will page in all index pages for the given object.
try {
IndexDescriptor* desc = ii.next();
IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex( desc );
verify( iam );
iam->touch(obj);
}
catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchIndexPages(): " << e.what() << endl;
}
unusedKeys.clear();
}
break;
}
default:
fassertFailed(16427);
}
}
示例2: indexDetails
Status Collection::validate( OperationContext* txn,
bool full, bool scanData,
ValidateResults* results, BSONObjBuilder* output ){
MyValidateAdaptor adaptor;
Status status = _recordStore->validate( txn, full, scanData, &adaptor, results, output );
if ( !status.isOK() )
return status;
{ // indexes
output->append("nIndexes", _indexCatalog.numIndexesReady( txn ) );
int idxn = 0;
try {
// Only applicable when 'full' validation is requested.
boost::scoped_ptr<BSONObjBuilder> indexDetails(full ? new BSONObjBuilder() : NULL);
BSONObjBuilder indexes; // not using subObjStart to be exception safe
IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(txn, false);
while( i.more() ) {
const IndexDescriptor* descriptor = i.next();
log(LogComponent::kIndex) << "validating index " << descriptor->indexNamespace() << endl;
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
invariant( iam );
boost::scoped_ptr<BSONObjBuilder> bob(
indexDetails.get() ? new BSONObjBuilder(
indexDetails->subobjStart(descriptor->indexNamespace())) :
NULL);
int64_t keys;
iam->validate(txn, full, &keys, bob.get());
indexes.appendNumber(descriptor->indexNamespace(),
static_cast<long long>(keys));
idxn++;
}
output->append("keysPerIndex", indexes.done());
if (indexDetails.get()) {
output->append("indexDetails", indexDetails->done());
}
}
catch ( DBException& exc ) {
string err = str::stream() <<
"exception during index validate idxn "<<
BSONObjBuilder::numStr(idxn) <<
": " << exc.toString();
results->errors.push_back( err );
results->valid = false;
}
}
return Status::OK();
}
示例3: addKeysToIndex
/**
* Add the provided (obj, dl) pair to the provided index.
*/
static void addKeysToIndex( Collection* collection, int idxNo,
const BSONObj& obj, const DiskLoc &recordLoc ) {
IndexDetails& id = collection->details()->idx(idxNo);
IndexDescriptor* desc = collection->getIndexCatalog()->getDescriptor( idxNo );
verify( desc );
IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex( desc );
verify( iam );
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed = (!KeyPattern::isIdKeyPattern(id.keyPattern()) && !id.unique())
|| ignoreUniqueIndex(id);
int64_t inserted;
Status ret = iam->insert(obj, recordLoc, options, &inserted);
uassertStatusOK( ret );
}
示例4: getIndexCatalog
uint64_t Collection::getIndexSize(OperationContext* opCtx, BSONObjBuilder* details, int scale) {
IndexCatalog* idxCatalog = getIndexCatalog();
IndexCatalog::IndexIterator ii = idxCatalog->getIndexIterator(opCtx, true);
uint64_t totalSize = 0;
while (ii.more()) {
IndexDescriptor* d = ii.next();
IndexAccessMethod* iam = idxCatalog->getIndex(d);
long long ds = iam->getSpaceUsedBytes(opCtx);
totalSize += ds;
if (details) {
details->appendNumber(d->indexName(), ds / scale);
}
}
return totalSize;
}
示例5: while
Status Collection::validate( bool full, bool scanData,
ValidateResults* results, BSONObjBuilder* output ){
MyValidateAdaptor adaptor;
Status status = _recordStore->validate( full, scanData, &adaptor, results, output );
if ( !status.isOK() )
return status;
{ // indexes
output->append("nIndexes", _indexCatalog.numIndexesReady() );
int idxn = 0;
try {
BSONObjBuilder indexes; // not using subObjStart to be exception safe
IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(false);
while( i.more() ) {
const IndexDescriptor* descriptor = i.next();
log() << "validating index " << descriptor->indexNamespace() << endl;
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
invariant( iam );
int64_t keys;
iam->validate(&keys);
indexes.appendNumber(descriptor->indexNamespace(),
static_cast<long long>(keys));
idxn++;
}
output->append("keysPerIndex", indexes.done());
}
catch ( DBException& exc ) {
string err = str::stream() <<
"exception during index validate idxn "<<
BSONObjBuilder::numStr(idxn) <<
": " << exc.toString();
results->errors.push_back( err );
results->valid = false;
}
}
return Status::OK();
}
示例6: checkValidation
StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
const RecordId& oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
bool enforceQuota,
bool indexesAffected,
OpDebug* debug,
oplogUpdateEntryArgs& args) {
{
auto status = checkValidation(txn, newDoc);
if (!status.isOK()) {
if (_validationLevel == STRICT_V) {
return status;
}
// moderate means we have to check the old doc
auto oldDocStatus = checkValidation(txn, oldDoc.value());
if (oldDocStatus.isOK()) {
// transitioning from good -> bad is not ok
return status;
}
// bad -> bad is ok in moderate mode
}
}
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
invariant(oldDoc.snapshotId() == txn->recoveryUnit()->getSnapshotId());
if (_needCappedLock) {
// X-lock the metadata resource for this capped collection until the end of the WUOW. This
// prevents the primary from executing with more concurrency than secondaries.
// See SERVER-21646.
Lock::ResourceLock{txn->lockState(), ResourceId(RESOURCE_METADATA, _ns.ns()), MODE_X};
}
SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
BSONElement oldId = oldDoc.value()["_id"];
if (!oldId.eoo() && (oldId != newDoc["_id"]))
return StatusWith<RecordId>(
ErrorCodes::InternalError, "in Collection::updateDocument _id mismatch", 13596);
// The MMAPv1 storage engine implements capped collections in a way that does not allow records
// to grow beyond their original size. If MMAPv1 part of a replicaset with storage engines that
// do not have this limitation, replication could result in errors, so it is necessary to set a
// uniform rule here. Similarly, it is not sufficient to disallow growing records, because this
// happens when secondaries roll back an update shrunk a record. Exactly replicating legacy
// MMAPv1 behavior would require padding shrunk documents on all storage engines. Instead forbid
// all size changes.
const auto oldSize = oldDoc.value().objsize();
if (_recordStore->isCapped() && oldSize != newDoc.objsize())
return {ErrorCodes::CannotGrowDocumentInCappedNamespace,
str::stream() << "Cannot change the size of a document in a capped collection: "
<< oldSize << " != " << newDoc.objsize()};
// At the end of this step, we will have a map of UpdateTickets, one per index, which
// represent the index updates needed to be done, based on the changes between oldDoc and
// newDoc.
OwnedPointerMap<IndexDescriptor*, UpdateTicket> updateTickets;
if (indexesAffected) {
IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true);
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
IndexCatalogEntry* entry = ii.catalogEntry(descriptor);
IndexAccessMethod* iam = ii.accessMethod(descriptor);
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed =
!(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique()) ||
repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
UpdateTicket* updateTicket = new UpdateTicket();
updateTickets.mutableMap()[descriptor] = updateTicket;
Status ret = iam->validateUpdate(txn,
oldDoc.value(),
newDoc,
oldLocation,
options,
updateTicket,
entry->getFilterExpression());
if (!ret.isOK()) {
return StatusWith<RecordId>(ret);
}
}
}
// This can call back into Collection::recordStoreGoingToMove. If that happens, the old
// object is removed from all indexes.
StatusWith<RecordId> newLocation = _recordStore->updateRecord(
txn, oldLocation, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota), this);
if (!newLocation.isOK()) {
return newLocation;
}
// At this point, the old object may or may not still be indexed, depending on if it was
// moved. If the object did move, we need to add the new location to all indexes.
if (newLocation.getValue() != oldLocation) {
if (debug) {
if (debug->nmoved == -1) // default of -1 rather than 0
debug->nmoved = 1;
//.........这里部分代码省略.........
示例7: appendCollectionStorageStats
Status appendCollectionStorageStats(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& param,
BSONObjBuilder* result) {
int scale = 1;
if (param["scale"].isNumber()) {
scale = param["scale"].numberInt();
if (scale < 1) {
return {ErrorCodes::BadValue, "scale has to be >= 1"};
}
} else if (param["scale"].trueValue()) {
return {ErrorCodes::BadValue, "scale has to be a number >= 1"};
}
bool verbose = param["verbose"].trueValue();
AutoGetCollectionForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection(); // Will be set if present
if (!ctx.getDb() || !collection) {
result->appendNumber("size", 0);
result->appendNumber("count", 0);
result->appendNumber("storageSize", 0);
result->append("nindexes", 0);
result->appendNumber("totalIndexSize", 0);
result->append("indexDetails", BSONObj());
result->append("indexSizes", BSONObj());
std::string errmsg = !(ctx.getDb()) ? "Database [" + nss.db().toString() + "] not found."
: "Collection [" + nss.toString() + "] not found.";
return {ErrorCodes::NamespaceNotFound, errmsg};
}
long long size = collection->dataSize(opCtx) / scale;
result->appendNumber("size", size);
long long numRecords = collection->numRecords(opCtx);
result->appendNumber("count", numRecords);
if (numRecords)
result->append("avgObjSize", collection->averageObjectSize(opCtx));
RecordStore* recordStore = collection->getRecordStore();
result->appendNumber(
"storageSize",
static_cast<long long>(recordStore->storageSize(opCtx, result, verbose ? 1 : 0)) / scale);
recordStore->appendCustomStats(opCtx, result, scale);
IndexCatalog* indexCatalog = collection->getIndexCatalog();
result->append("nindexes", indexCatalog->numIndexesReady(opCtx));
BSONObjBuilder indexDetails;
IndexCatalog::IndexIterator i = indexCatalog->getIndexIterator(opCtx, false);
while (i.more()) {
const IndexDescriptor* descriptor = i.next();
IndexAccessMethod* iam = indexCatalog->getIndex(descriptor);
invariant(iam);
BSONObjBuilder bob;
if (iam->appendCustomStats(opCtx, &bob, scale)) {
indexDetails.append(descriptor->indexName(), bob.obj());
}
}
result->append("indexDetails", indexDetails.obj());
BSONObjBuilder indexSizes;
long long indexSize = collection->getIndexSize(opCtx, &indexSizes, scale);
result->appendNumber("totalIndexSize", indexSize / scale);
result->append("indexSizes", indexSizes.obj());
return Status::OK();
}
示例8: validateNS
//.........这里部分代码省略.........
int ndel = 0;
long long delSize = 0;
BSONArrayBuilder delBucketSizes;
int incorrect = 0;
for ( int i = 0; i < Buckets; i++ ) {
DiskLoc loc = nsd->deletedListEntry(i);
try {
int k = 0;
while ( !loc.isNull() ) {
if ( recs.count(loc) )
incorrect++;
ndel++;
if ( loc.questionable() ) {
if( nsd->isCapped() && !loc.isValid() && i == 1 ) {
/* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
see comments in namespace.h
*/
break;
}
string err( str::stream() << "bad pointer in deleted record list: "
<< loc.toString()
<< " bucket: " << i
<< " k: " << k );
errors << err;
valid = false;
break;
}
DeletedRecord *d = loc.drec();
delSize += d->lengthWithHeaders();
loc = d->nextDeleted();
k++;
killCurrentOp.checkForInterrupt();
}
delBucketSizes << k;
}
catch (...) {
errors << ("exception in deleted chain for bucket " + BSONObjBuilder::numStr(i));
valid = false;
}
}
result.appendNumber("deletedCount", ndel);
result.appendNumber("deletedSize", delSize);
if ( full ) {
result << "delBucketSizes" << delBucketSizes.arr();
}
if ( incorrect ) {
errors << (BSONObjBuilder::numStr(incorrect) + " records from datafile are in deleted list");
valid = false;
}
int idxn = 0;
try {
IndexCatalog* indexCatalog = collection->getIndexCatalog();
result.append("nIndexes", nsd->getCompletedIndexCount());
BSONObjBuilder indexes; // not using subObjStart to be exception safe
NamespaceDetails::IndexIterator i = nsd->ii();
while( i.more() ) {
IndexDetails& id = i.next();
log() << "validating index " << idxn << ": " << id.indexNamespace() << endl;
IndexDescriptor* descriptor = indexCatalog->getDescriptor( idxn );
verify( descriptor );
IndexAccessMethod* iam = indexCatalog->getIndex( descriptor );
verify( iam );
int64_t keys;
iam->validate(&keys);
indexes.appendNumber(id.indexNamespace(), static_cast<long long>(keys));
idxn++;
}
result.append("keysPerIndex", indexes.done());
}
catch (...) {
errors << ("exception during index validate idxn " + BSONObjBuilder::numStr(idxn));
valid=false;
}
}
catch (AssertionException) {
errors << "exception during validate";
valid = false;
}
result.appendBool("valid", valid);
result.append("errors", errors.arr());
if ( !full ){
result.append("warning", "Some checks omitted for speed. use {full:true} option to do more thorough scan.");
}
if ( !valid ) {
result.append("advice", "ns corrupt, requires repair");
}
}
示例9: checkValidation
StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
const RecordId& oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
bool enforceQuota,
bool indexesAffected,
OpDebug* debug,
oplogUpdateEntryArgs& args) {
{
auto status = checkValidation(txn, newDoc);
if (!status.isOK()) {
if (_validationLevel == STRICT_V) {
return status;
}
// moderate means we have to check the old doc
auto oldDocStatus = checkValidation(txn, oldDoc.value());
if (oldDocStatus.isOK()) {
// transitioning from good -> bad is not ok
return status;
}
// bad -> bad is ok in moderate mode
}
}
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
invariant(oldDoc.snapshotId() == txn->recoveryUnit()->getSnapshotId());
SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
BSONElement oldId = oldDoc.value()["_id"];
if (!oldId.eoo() && (oldId != newDoc["_id"]))
return StatusWith<RecordId>(
ErrorCodes::InternalError, "in Collection::updateDocument _id mismatch", 13596);
// At the end of this step, we will have a map of UpdateTickets, one per index, which
// represent the index updates needed to be done, based on the changes between oldDoc and
// newDoc.
OwnedPointerMap<IndexDescriptor*, UpdateTicket> updateTickets;
if (indexesAffected) {
IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true);
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
IndexCatalogEntry* entry = ii.catalogEntry(descriptor);
IndexAccessMethod* iam = ii.accessMethod(descriptor);
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed =
!(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique()) ||
repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
UpdateTicket* updateTicket = new UpdateTicket();
updateTickets.mutableMap()[descriptor] = updateTicket;
Status ret = iam->validateUpdate(txn,
oldDoc.value(),
newDoc,
oldLocation,
options,
updateTicket,
entry->getFilterExpression());
if (!ret.isOK()) {
return StatusWith<RecordId>(ret);
}
}
}
// This can call back into Collection::recordStoreGoingToMove. If that happens, the old
// object is removed from all indexes.
StatusWith<RecordId> newLocation = _recordStore->updateRecord(
txn, oldLocation, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota), this);
if (!newLocation.isOK()) {
return newLocation;
}
// At this point, the old object may or may not still be indexed, depending on if it was
// moved. If the object did move, we need to add the new location to all indexes.
if (newLocation.getValue() != oldLocation) {
if (debug) {
if (debug->nmoved == -1) // default of -1 rather than 0
debug->nmoved = 1;
else
debug->nmoved += 1;
}
Status s = _indexCatalog.indexRecord(txn, newDoc, newLocation.getValue());
if (!s.isOK())
return StatusWith<RecordId>(s);
invariant(sid == txn->recoveryUnit()->getSnapshotId());
args.ns = ns().ns();
getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
return newLocation;
}
// Object did not move. We update each index with each respective UpdateTicket.
if (debug)
debug->keyUpdates = 0;
if (indexesAffected) {
//.........这里部分代码省略.........
示例10: while
StatusWith<DiskLoc> Collection::updateDocument( OperationContext* txn,
const DiskLoc& oldLocation,
const BSONObj& objNew,
bool enforceQuota,
OpDebug* debug ) {
BSONObj objOld = _recordStore->dataFor( txn, oldLocation ).toBson();
if ( objOld.hasElement( "_id" ) ) {
BSONElement oldId = objOld["_id"];
BSONElement newId = objNew["_id"];
if ( oldId != newId )
return StatusWith<DiskLoc>( ErrorCodes::InternalError,
"in Collection::updateDocument _id mismatch",
13596 );
}
if ( ns().coll() == "system.users" ) {
// XXX - andy and spencer think this should go away now
V2UserDocumentParser parser;
Status s = parser.checkValidUserDocument(objNew);
if ( !s.isOK() )
return StatusWith<DiskLoc>( s );
}
/* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
*/
OwnedPointerMap<IndexDescriptor*,UpdateTicket> updateTickets;
IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, true );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed =
!(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique())
|| repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
UpdateTicket* updateTicket = new UpdateTicket();
updateTickets.mutableMap()[descriptor] = updateTicket;
Status ret = iam->validateUpdate(txn, objOld, objNew, oldLocation, options, updateTicket );
if ( !ret.isOK() ) {
return StatusWith<DiskLoc>( ret );
}
}
// this can callback into Collection::recordStoreGoingToMove
StatusWith<DiskLoc> newLocation = _recordStore->updateRecord( txn,
oldLocation,
objNew.objdata(),
objNew.objsize(),
_enforceQuota( enforceQuota ),
this );
if ( !newLocation.isOK() ) {
return newLocation;
}
_infoCache.notifyOfWriteOp();
if ( newLocation.getValue() != oldLocation ) {
if ( debug ) {
if (debug->nmoved == -1) // default of -1 rather than 0
debug->nmoved = 1;
else
debug->nmoved += 1;
}
_indexCatalog.indexRecord(txn, objNew, newLocation.getValue());
return newLocation;
}
if ( debug )
debug->keyUpdates = 0;
ii = _indexCatalog.getIndexIterator( txn, true );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
int64_t updatedKeys;
Status ret = iam->update(txn, *updateTickets.mutableMap()[descriptor], &updatedKeys);
if ( !ret.isOK() )
return StatusWith<DiskLoc>( ret );
if ( debug )
debug->keyUpdates += updatedKeys;
}
// Broadcast the mutation so that query results stay correct.
_cursorCache.invalidateDocument(oldLocation, INVALIDATION_MUTATION);
return newLocation;
}
示例11: details
StatusWith<CompactStats> Collection::compact( const CompactOptions* compactOptions ) {
if ( isCapped() )
return StatusWith<CompactStats>( ErrorCodes::BadValue,
"cannot compact capped collection" );
if ( _indexCatalog.numIndexesInProgress() )
return StatusWith<CompactStats>( ErrorCodes::BadValue,
"cannot compact when indexes in progress" );
NamespaceDetails* d = details();
// this is a big job, so might as well make things tidy before we start just to be nice.
getDur().commitIfNeeded();
list<DiskLoc> extents;
for( DiskLoc L = d->firstExtent(); !L.isNull(); L = L.ext()->xnext )
extents.push_back(L);
log() << "compact " << extents.size() << " extents" << endl;
// same data, but might perform a little different after compact?
_infoCache.reset();
vector<BSONObj> indexSpecs;
{
IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( false ) );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
indexSpecs.push_back( _compactAdjustIndexSpec( descriptor->infoObj() ) );
}
}
log() << "compact orphan deleted lists" << endl;
d->orphanDeletedList();
// Start over from scratch with our extent sizing and growth
d->setLastExtentSize( 0 );
// before dropping indexes, at least make sure we can allocate one extent!
if ( allocateSpaceForANewRecord( _ns.ns().c_str(),
d,
Record::HeaderSize+1,
false).isNull() ) {
return StatusWith<CompactStats>( ErrorCodes::InternalError,
"compact error no space available to allocate" );
}
// note that the drop indexes call also invalidates all clientcursors for the namespace,
// which is important and wanted here
log() << "compact dropping indexes" << endl;
Status status = _indexCatalog.dropAllIndexes( true );
if ( !status.isOK() ) {
return StatusWith<CompactStats>( status );
}
getDur().commitIfNeeded();
CompactStats stats;
OwnedPointerVector<IndexCatalog::IndexBuildBlock> indexBuildBlocks;
vector<IndexAccessMethod*> indexesToInsertTo;
vector< std::pair<IndexAccessMethod*,IndexAccessMethod*> > bulkToCommit;
for ( size_t i = 0; i < indexSpecs.size(); i++ ) {
killCurrentOp.checkForInterrupt(false);
BSONObj info = indexSpecs[i];
info = _compactAdjustIndexSpec( info );
info = _indexCatalog.fixIndexSpec( info );
auto_ptr<IndexCatalog::IndexBuildBlock> block( new IndexCatalog::IndexBuildBlock( this,info ) );
Status status = block->init();
if ( !status.isOK() )
return StatusWith<CompactStats>(status);
IndexAccessMethod* accessMethod = block->getEntry()->accessMethod();
status = accessMethod->initializeAsEmpty();
if ( !status.isOK() )
return StatusWith<CompactStats>(status);
IndexAccessMethod* bulk = accessMethod->initiateBulk();
if ( bulk ) {
indexesToInsertTo.push_back( bulk );
bulkToCommit.push_back( std::pair<IndexAccessMethod*,IndexAccessMethod*>( accessMethod, bulk ) );
}
else {
indexesToInsertTo.push_back( accessMethod );
}
indexBuildBlocks.mutableVector().push_back( block.release() );
}
// reset data size and record counts to 0 for this namespace
// as we're about to tally them up again for each new extent
d->setStats( 0, 0 );
ProgressMeterHolder pm(cc().curop()->setMessage("compact extent",
"Extent Compacting Progress",
extents.size()));
int extentNumber = 0;
for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
_compactExtent(*i, extentNumber++, indexesToInsertTo, compactOptions, &stats );
//.........这里部分代码省略.........
示例12: compactCollection
StatusWith<CompactStats> compactCollection(OperationContext* opCtx,
Collection* collection,
const CompactOptions* compactOptions) {
dassert(opCtx->lockState()->isCollectionLockedForMode(collection->ns().toString(), MODE_X));
DisableDocumentValidation validationDisabler(opCtx);
auto recordStore = collection->getRecordStore();
auto indexCatalog = collection->getIndexCatalog();
if (!recordStore->compactSupported())
return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported,
str::stream()
<< "cannot compact collection with record store: "
<< recordStore->name());
if (recordStore->compactsInPlace()) {
CompactStats stats;
Status status = recordStore->compact(opCtx);
if (!status.isOK())
return StatusWith<CompactStats>(status);
// Compact all indexes (not including unfinished indexes)
std::unique_ptr<IndexCatalog::IndexIterator> ii(
indexCatalog->getIndexIterator(opCtx, false));
while (ii->more()) {
IndexCatalogEntry* entry = ii->next();
IndexDescriptor* descriptor = entry->descriptor();
IndexAccessMethod* iam = entry->accessMethod();
LOG(1) << "compacting index: " << descriptor->toString();
Status status = iam->compact(opCtx);
if (!status.isOK()) {
error() << "failed to compact index: " << descriptor->toString();
return status;
}
}
return StatusWith<CompactStats>(stats);
}
if (indexCatalog->numIndexesInProgress(opCtx))
return StatusWith<CompactStats>(ErrorCodes::BadValue,
"cannot compact when indexes in progress");
std::vector<BSONObj> indexSpecs;
{
std::unique_ptr<IndexCatalog::IndexIterator> ii(
indexCatalog->getIndexIterator(opCtx, false));
while (ii->more()) {
IndexDescriptor* descriptor = ii->next()->descriptor();
// Compact always creates the new index in the foreground.
const BSONObj spec =
descriptor->infoObj().removeField(IndexDescriptor::kBackgroundFieldName);
const BSONObj key = spec.getObjectField("key");
const Status keyStatus =
index_key_validate::validateKeyPattern(key, descriptor->version());
if (!keyStatus.isOK()) {
return StatusWith<CompactStats>(
ErrorCodes::CannotCreateIndex,
str::stream() << "Cannot compact collection due to invalid index " << spec
<< ": "
<< keyStatus.reason()
<< " For more info see"
<< " http://dochub.mongodb.org/core/index-validation");
}
indexSpecs.push_back(spec);
}
}
// Give a chance to be interrupted *before* we drop all indexes.
opCtx->checkForInterrupt();
{
// note that the drop indexes call also invalidates all clientcursors for the namespace,
// which is important and wanted here
WriteUnitOfWork wunit(opCtx);
log() << "compact dropping indexes";
indexCatalog->dropAllIndexes(opCtx, true);
wunit.commit();
}
CompactStats stats;
MultiIndexBlockImpl indexer(opCtx, collection);
indexer.allowInterruption();
indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking
Status status = indexer.init(indexSpecs).getStatus();
if (!status.isOK())
return StatusWith<CompactStats>(status);
status = recordStore->compact(opCtx);
if (!status.isOK())
return StatusWith<CompactStats>(status);
log() << "starting index commits";
status = indexer.dumpInsertsFromBulk();
if (!status.isOK())
//.........这里部分代码省略.........
示例13: dassert
StatusWith<CompactStats> Collection::compact(OperationContext* txn,
const CompactOptions* compactOptions) {
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
DisableDocumentValidation validationDisabler(txn);
if (!_recordStore->compactSupported())
return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported,
str::stream()
<< "cannot compact collection with record store: "
<< _recordStore->name());
if (_recordStore->compactsInPlace()) {
CompactStats stats;
Status status = _recordStore->compact(txn, NULL, compactOptions, &stats);
if (!status.isOK())
return StatusWith<CompactStats>(status);
// Compact all indexes (not including unfinished indexes)
IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false));
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* index = _indexCatalog.getIndex(descriptor);
LOG(1) << "compacting index: " << descriptor->toString();
Status status = index->compact(txn);
if (!status.isOK()) {
error() << "failed to compact index: " << descriptor->toString();
return status;
}
}
return StatusWith<CompactStats>(stats);
}
if (_indexCatalog.numIndexesInProgress(txn))
return StatusWith<CompactStats>(ErrorCodes::BadValue,
"cannot compact when indexes in progress");
vector<BSONObj> indexSpecs;
{
IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false));
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj());
const BSONObj key = spec.getObjectField("key");
const Status keyStatus = validateKeyPattern(key);
if (!keyStatus.isOK()) {
return StatusWith<CompactStats>(
ErrorCodes::CannotCreateIndex,
str::stream() << "Cannot compact collection due to invalid index " << spec
<< ": "
<< keyStatus.reason()
<< " For more info see"
<< " http://dochub.mongodb.org/core/index-validation");
}
indexSpecs.push_back(spec);
}
}
// Give a chance to be interrupted *before* we drop all indexes.
txn->checkForInterrupt();
{
// note that the drop indexes call also invalidates all clientcursors for the namespace,
// which is important and wanted here
WriteUnitOfWork wunit(txn);
log() << "compact dropping indexes";
Status status = _indexCatalog.dropAllIndexes(txn, true);
if (!status.isOK()) {
return StatusWith<CompactStats>(status);
}
wunit.commit();
}
CompactStats stats;
MultiIndexBlock indexer(txn, this);
indexer.allowInterruption();
indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking
Status status = indexer.init(indexSpecs);
if (!status.isOK())
return StatusWith<CompactStats>(status);
MyCompactAdaptor adaptor(this, &indexer);
status = _recordStore->compact(txn, &adaptor, compactOptions, &stats);
if (!status.isOK())
return StatusWith<CompactStats>(status);
log() << "starting index commits";
status = indexer.doneInserting();
if (!status.isOK())
return StatusWith<CompactStats>(status);
{
WriteUnitOfWork wunit(txn);
indexer.commit();
//.........这里部分代码省略.........
示例14: getExtentManager
StatusWith<DiskLoc> Collection::updateDocument( const DiskLoc& oldLocation,
const BSONObj& objNew,
bool enforceQuota,
OpDebug* debug ) {
Record* oldRecord = getExtentManager()->recordFor( oldLocation );
BSONObj objOld = BSONObj::make( oldRecord );
if ( objOld.hasElement( "_id" ) ) {
BSONElement oldId = objOld["_id"];
BSONElement newId = objNew["_id"];
if ( oldId != newId )
return StatusWith<DiskLoc>( ErrorCodes::InternalError,
"in Collection::updateDocument _id mismatch",
13596 );
}
if ( ns().coll() == "system.users" ) {
// XXX - andy and spencer think this should go away now
V2UserDocumentParser parser;
Status s = parser.checkValidUserDocument(objNew);
if ( !s.isOK() )
return StatusWith<DiskLoc>( s );
}
/* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
*/
OwnedPointerVector<UpdateTicket> updateTickets;
updateTickets.mutableVector().resize(_indexCatalog.numIndexesTotal());
for (int i = 0; i < _indexCatalog.numIndexesTotal(); ++i) {
IndexDescriptor* descriptor = _indexCatalog.getDescriptor( i );
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed =
!(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique())
|| ignoreUniqueIndex(descriptor);
updateTickets.mutableVector()[i] = new UpdateTicket();
Status ret = iam->validateUpdate(objOld, objNew, oldLocation, options,
updateTickets.mutableVector()[i]);
if ( !ret.isOK() ) {
return StatusWith<DiskLoc>( ret );
}
}
if ( oldRecord->netLength() < objNew.objsize() ) {
// doesn't fit, have to move to new location
if ( _details->isCapped() )
return StatusWith<DiskLoc>( ErrorCodes::InternalError,
"failing update: objects in a capped ns cannot grow",
10003 );
moveCounter.increment();
_details->paddingTooSmall();
// unindex old record, don't delete
// this way, if inserting new doc fails, we can re-index this one
ClientCursor::aboutToDelete(_ns.ns(), _details, oldLocation);
_indexCatalog.unindexRecord( objOld, oldLocation, true );
if ( debug ) {
if (debug->nmoved == -1) // default of -1 rather than 0
debug->nmoved = 1;
else
debug->nmoved += 1;
}
StatusWith<DiskLoc> loc = insertDocument( objNew, enforceQuota );
if ( loc.isOK() ) {
// insert successful, now lets deallocate the old location
// remember its already unindexed
_recordStore.deallocRecord( oldLocation, oldRecord );
}
else {
// new doc insert failed, so lets re-index the old document and location
_indexCatalog.indexRecord( objOld, oldLocation );
}
return loc;
}
_infoCache.notifyOfWriteOp();
_details->paddingFits();
if ( debug )
debug->keyUpdates = 0;
for (int i = 0; i < _indexCatalog.numIndexesTotal(); ++i) {
IndexDescriptor* descriptor = _indexCatalog.getDescriptor( i );
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
int64_t updatedKeys;
Status ret = iam->update(*updateTickets.vector()[i], &updatedKeys);
if ( !ret.isOK() )
return StatusWith<DiskLoc>( ret );
if ( debug )
//.........这里部分代码省略.........
示例15: while
StatusWith<RecordId> Collection::updateDocument( OperationContext* txn,
const RecordId& oldLocation,
const BSONObj& objNew,
bool enforceQuota,
OpDebug* debug ) {
BSONObj objOld = _recordStore->dataFor( txn, oldLocation ).releaseToBson();
if ( objOld.hasElement( "_id" ) ) {
BSONElement oldId = objOld["_id"];
BSONElement newId = objNew["_id"];
if ( oldId != newId )
return StatusWith<RecordId>( ErrorCodes::InternalError,
"in Collection::updateDocument _id mismatch",
13596 );
}
/* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
*/
// At the end of this step, we will have a map of UpdateTickets, one per index, which
// represent the index updates needed to be done, based on the changes between objOld and
// objNew.
OwnedPointerMap<IndexDescriptor*,UpdateTicket> updateTickets;
IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, true );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed =
!(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique())
|| repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
UpdateTicket* updateTicket = new UpdateTicket();
updateTickets.mutableMap()[descriptor] = updateTicket;
Status ret = iam->validateUpdate(txn, objOld, objNew, oldLocation, options, updateTicket );
if ( !ret.isOK() ) {
return StatusWith<RecordId>( ret );
}
}
// This can call back into Collection::recordStoreGoingToMove. If that happens, the old
// object is removed from all indexes.
StatusWith<RecordId> newLocation = _recordStore->updateRecord( txn,
oldLocation,
objNew.objdata(),
objNew.objsize(),
_enforceQuota( enforceQuota ),
this );
if ( !newLocation.isOK() ) {
return newLocation;
}
// At this point, the old object may or may not still be indexed, depending on if it was
// moved.
_infoCache.notifyOfWriteOp();
// If the object did move, we need to add the new location to all indexes.
if ( newLocation.getValue() != oldLocation ) {
if ( debug ) {
if (debug->nmoved == -1) // default of -1 rather than 0
debug->nmoved = 1;
else
debug->nmoved += 1;
}
Status s = _indexCatalog.indexRecord(txn, objNew, newLocation.getValue());
if (!s.isOK())
return StatusWith<RecordId>(s);
return newLocation;
}
// Object did not move. We update each index with each respective UpdateTicket.
if ( debug )
debug->keyUpdates = 0;
ii = _indexCatalog.getIndexIterator( txn, true );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
int64_t updatedKeys;
Status ret = iam->update(txn, *updateTickets.mutableMap()[descriptor], &updatedKeys);
if ( !ret.isOK() )
return StatusWith<RecordId>( ret );
if ( debug )
debug->keyUpdates += updatedKeys;
}
// Broadcast the mutation so that query results stay correct.
_cursorCache.invalidateDocument(txn, oldLocation, INVALIDATION_MUTATION);
return newLocation;
//.........这里部分代码省略.........