本文整理汇总了C++中ProgressMeterHolder::hit方法的典型用法代码示例。如果您正苦于以下问题:C++ ProgressMeterHolder::hit方法的具体用法?C++ ProgressMeterHolder::hit怎么用?C++ ProgressMeterHolder::hit使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ProgressMeterHolder
的用法示例。
在下文中一共展示了ProgressMeterHolder::hit方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: led
void buildBottomUpPhases2And3(bool dupsAllowed, IndexDetails& idx, BSONObjExternalSorter& sorter,
bool dropDups, set<DiskLoc> &dupsToDrop, CurOp * op, SortPhaseOne *phase1, ProgressMeterHolder &pm,
Timer& t
)
{
BtreeBuilder<V> btBuilder(dupsAllowed, idx);
BSONObj keyLast;
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
verify( pm == op->setMessage( "index: (2/3) btree bottom up" , phase1->nkeys , 10 ) );
while( i->more() ) {
RARELY killCurrentOp.checkForInterrupt();
BSONObjExternalSorter::Data d = i->next();
try {
if ( !dupsAllowed && dropDups ) {
LastError::Disabled led( lastError.get() );
btBuilder.addKey(d.first, d.second);
}
else {
btBuilder.addKey(d.first, d.second);
}
}
catch( AssertionException& e ) {
if ( dupsAllowed ) {
// unknown exception??
throw;
}
if( e.interrupted() ) {
killCurrentOp.checkForInterrupt();
}
if ( ! dropDups )
throw;
/* we could queue these on disk, but normally there are very few dups, so instead we
keep in ram and have a limit.
*/
dupsToDrop.insert(d.second);
uassert( 10092 , "too may dups on index build with dropDups=true", dupsToDrop.size() < 1000000 );
}
pm.hit();
}
pm.finished();
op->setMessage( "index: (3/3) btree-middle" );
log(t.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit" << endl;
btBuilder.commit();
if ( btBuilder.getn() != phase1->nkeys && ! dropDups ) {
warning() << "not all entries were added to the index, probably some keys were too large" << endl;
}
}
示例2: drainWritesIntoIndex
//.........这里部分代码省略.........
auto cursor = _sideWritesTable->rs()->getCursor(opCtx);
bool atEof = false;
while (!atEof) {
opCtx->checkForInterrupt();
// Stashed records should be inserted into a batch first.
if (stashed) {
invariant(batch.empty());
batch.push_back(std::move(stashed.get()));
stashed.reset();
}
auto record = cursor->next();
if (record) {
RecordId currentRecordId = record->id;
BSONObj docOut = record->data.toBson().getOwned();
// If the total batch size in bytes would be too large, stash this document and let the
// current batch insert.
int objSize = docOut.objsize();
if (batchSizeBytes + objSize > kBatchMaxBytes) {
invariant(!stashed);
// Stash this document to be inserted in the next batch.
stashed.emplace(currentRecordId, std::move(docOut));
} else {
batchSizeBytes += objSize;
batch.emplace_back(currentRecordId, std::move(docOut));
// Continue if there is more room in the batch.
if (batch.size() < kBatchMaxSize) {
continue;
}
}
} else {
atEof = true;
if (batch.empty())
break;
}
invariant(!batch.empty());
cursor->save();
// If we are here, either we have reached the end of the table or the batch is full, so
// insert everything in one WriteUnitOfWork, and delete each inserted document from the side
// writes table.
auto status = writeConflictRetry(opCtx, "index build drain", _indexCatalogEntry->ns(), [&] {
WriteUnitOfWork wuow(opCtx);
for (auto& operation : batch) {
auto status =
_applyWrite(opCtx, operation.second, options, &totalInserted, &totalDeleted);
if (!status.isOK()) {
return status;
}
// Delete the document from the table as soon as it has been inserted into the
// index. This ensures that no key is ever inserted twice and no keys are skipped.
_sideWritesTable->rs()->deleteRecord(opCtx, operation.first);
}
// For rollback to work correctly, these writes need to be timestamped. The actual time
// is not important, as long as it not older than the most recent visible side write.
IndexTimestampHelper::setGhostCommitTimestampForWrite(
opCtx, NamespaceString(_indexCatalogEntry->ns()));
wuow.commit();
return Status::OK();
});
if (!status.isOK()) {
return status;
}
progress->hit(batch.size());
// Lock yielding will only happen if we are holding intent locks.
_tryYield(opCtx);
cursor->restore();
// Account for more writes coming in during a batch.
progress->setTotalWhileRunning(_sideWritesCounter.loadRelaxed() - appliedAtStart);
_numApplied += batch.size();
batch.clear();
batchSizeBytes = 0;
}
progress->finished();
int logLevel = (_numApplied - appliedAtStart > 0) ? 0 : 1;
LOG(logLevel) << "index build: drain applied " << (_numApplied - appliedAtStart)
<< " side writes (inserted: " << totalInserted << ", deleted: " << totalDeleted
<< ") for '" << _indexCatalogEntry->descriptor()->indexName() << "' in "
<< timer.millis() << " ms";
return Status::OK();
}
示例3: commitBulk
Status AbstractIndexAccessMethod::commitBulk(OperationContext* opCtx,
BulkBuilder* bulk,
bool mayInterrupt,
bool dupsAllowed,
set<RecordId>* dupRecords,
std::vector<BSONObj>* dupKeysInserted) {
// Cannot simultaneously report uninserted duplicates 'dupRecords' and inserted duplicates
// 'dupKeysInserted'.
invariant(!(dupRecords && dupKeysInserted));
Timer timer;
std::unique_ptr<BulkBuilder::Sorter::Iterator> it(bulk->done());
static const char* message = "Index Build: inserting keys from external sorter into index";
ProgressMeterHolder pm;
{
stdx::unique_lock<Client> lk(*opCtx->getClient());
pm.set(CurOp::get(opCtx)->setProgress_inlock(
message, bulk->getKeysInserted(), 3 /* secondsBetween */));
}
auto builder = std::unique_ptr<SortedDataBuilderInterface>(
_newInterface->getBulkBuilder(opCtx, dupsAllowed));
bool checkIndexKeySize = shouldCheckIndexKeySize(opCtx);
BSONObj previousKey;
const Ordering ordering = Ordering::make(_descriptor->keyPattern());
while (it->more()) {
if (mayInterrupt) {
opCtx->checkForInterrupt();
}
WriteUnitOfWork wunit(opCtx);
// Get the next datum and add it to the builder.
BulkBuilder::Sorter::Data data = it->next();
// Before attempting to insert, perform a duplicate key check.
bool isDup = false;
if (_descriptor->unique()) {
isDup = data.first.woCompare(previousKey, ordering) == 0;
if (isDup && !dupsAllowed) {
if (dupRecords) {
dupRecords->insert(data.second);
continue;
}
return buildDupKeyErrorStatus(data.first,
_descriptor->parentNS(),
_descriptor->indexName(),
_descriptor->keyPattern());
}
}
Status status = checkIndexKeySize ? checkKeySize(data.first) : Status::OK();
if (status.isOK()) {
StatusWith<SpecialFormatInserted> ret = builder->addKey(data.first, data.second);
status = ret.getStatus();
if (status.isOK() && ret.getValue() == SpecialFormatInserted::LongTypeBitsInserted)
_btreeState->setIndexKeyStringWithLongTypeBitsExistsOnDisk(opCtx);
}
if (!status.isOK()) {
// Duplicates are checked before inserting.
invariant(status.code() != ErrorCodes::DuplicateKey);
// Overlong key that's OK to skip?
// TODO SERVER-36385: Remove this when there is no KeyTooLong error.
if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong()) {
continue;
}
return status;
}
previousKey = data.first.getOwned();
if (isDup && dupsAllowed && dupKeysInserted) {
dupKeysInserted->push_back(data.first.getOwned());
}
// If we're here either it's a dup and we're cool with it or the addKey went just fine.
pm.hit();
wunit.commit();
}
pm.finished();
log() << "index build: inserted keys from external sorter into index in " << timer.seconds()
<< " seconds";
WriteUnitOfWork wunit(opCtx);
SpecialFormatInserted specialFormatInserted = builder->commit(mayInterrupt);
// It's ok to insert KeyStrings with long TypeBits but we need to mark the feature
// tracker bit so that downgrade binary which cannot read the long TypeBits fails to
// start up.
if (specialFormatInserted == SpecialFormatInserted::LongTypeBitsInserted)
_btreeState->setIndexKeyStringWithLongTypeBitsExistsOnDisk(opCtx);
//.........这里部分代码省略.........