本文整理汇总了C++中DiskLoc::isNull方法的典型用法代码示例。如果您正苦于以下问题:C++ DiskLoc::isNull方法的具体用法?C++ DiskLoc::isNull怎么用?C++ DiskLoc::isNull使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DiskLoc
的用法示例。
在下文中一共展示了DiskLoc::isNull方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: repairDatabase
//.........这里部分代码省略.........
}
namespacesToCopy[ns] = options;
}
}
}
for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin();
i != namespacesToCopy.end();
++i ) {
string ns = i->first;
CollectionOptions options = i->second;
Collection* tempCollection = NULL;
{
Client::Context tempContext( ns, tempDatabase );
tempCollection = tempDatabase->createCollection( ns, options, true, false );
}
Client::Context readContext( ns, originalDatabase );
Collection* originalCollection = originalDatabase->getCollection( ns );
invariant( originalCollection );
// data
MultiIndexBlock indexBlock( tempCollection );
{
vector<BSONObj> indexes;
IndexCatalog::IndexIterator ii =
originalCollection->getIndexCatalog()->getIndexIterator( false );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
indexes.push_back( desc->infoObj() );
}
Client::Context tempContext( ns, tempDatabase );
Status status = indexBlock.init( indexes );
if ( !status.isOK() )
return status;
}
scoped_ptr<CollectionIterator> iterator( originalCollection->getIterator( DiskLoc(),
false,
CollectionScanParams::FORWARD ) );
while ( !iterator->isEOF() ) {
DiskLoc loc = iterator->getNext();
invariant( !loc.isNull() );
BSONObj doc = originalCollection->docFor( loc );
Client::Context tempContext( ns, tempDatabase );
StatusWith<DiskLoc> result = tempCollection->insertDocument( doc, indexBlock );
if ( !result.isOK() )
return result.getStatus();
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt(false);
}
{
Client::Context tempContext( ns, tempDatabase );
Status status = indexBlock.commit();
if ( !status.isOK() )
return status;
}
}
getDur().syncDataAndTruncateJournal();
MongoFile::flushAll(true); // need both in case journaling is disabled
killCurrentOp.checkForInterrupt(false);
Client::Context tempContext( dbName, reservedPathString );
Database::closeDatabase( dbName, reservedPathString );
}
Client::Context ctx( dbName );
Database::closeDatabase(dbName, storageGlobalParams.dbpath);
if ( backupOriginalFiles ) {
_renameForBackup( dbName, reservedPath );
}
else {
_deleteDataFiles( dbName );
MONGO_ASSERT_ON_EXCEPTION(
boost::filesystem::create_directory(Path(storageGlobalParams.dbpath) / dbName));
}
if ( repairFileDeleter.get() )
repairFileDeleter->success();
_replaceWithRecovered( dbName, reservedPathString.c_str() );
if ( !backupOriginalFiles )
MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );
return Status::OK();
}
示例2: OK
Status RecordStoreV1Base::validate( OperationContext* txn,
bool full, bool scanData,
ValidateAdaptor* adaptor,
ValidateResults* results, BSONObjBuilder* output ) const {
// 1) basic status that require no iteration
// 2) extent level info
// 3) check extent start and end
// 4) check each non-deleted record
// 5) check deleted list
// -------------
// 1111111111111111111
if ( isCapped() ){
output->appendBool("capped", true);
output->appendNumber("max", _details->maxCappedDocs());
}
output->appendNumber("datasize", _details->dataSize());
output->appendNumber("nrecords", _details->numRecords());
output->appendNumber("lastExtentSize", _details->lastExtentSize(txn));
output->appendNumber("padding", _details->paddingFactor());
if ( _details->firstExtent(txn).isNull() )
output->append( "firstExtent", "null" );
else
output->append( "firstExtent",
str::stream() << _details->firstExtent(txn).toString()
<< " ns:"
<< _getExtent( txn, _details->firstExtent(txn) )->nsDiagnostic.toString());
if ( _details->lastExtent(txn).isNull() )
output->append( "lastExtent", "null" );
else
output->append( "lastExtent", str::stream() << _details->lastExtent(txn).toString()
<< " ns:"
<< _getExtent( txn, _details->lastExtent(txn) )->nsDiagnostic.toString());
// 22222222222222222222222222
{ // validate extent basics
BSONArrayBuilder extentData;
int extentCount = 0;
DiskLoc extentDiskLoc;
try {
if ( !_details->firstExtent(txn).isNull() ) {
_getExtent( txn, _details->firstExtent(txn) )->assertOk();
_getExtent( txn, _details->lastExtent(txn) )->assertOk();
}
extentDiskLoc = _details->firstExtent(txn);
while (!extentDiskLoc.isNull()) {
Extent* thisExtent = _getExtent( txn, extentDiskLoc );
if (full) {
extentData << thisExtent->dump();
}
if (!thisExtent->validates(extentDiskLoc, &results->errors)) {
results->valid = false;
}
DiskLoc nextDiskLoc = thisExtent->xnext;
if (extentCount > 0 && !nextDiskLoc.isNull()
&& _getExtent( txn, nextDiskLoc )->xprev != extentDiskLoc) {
StringBuilder sb;
sb << "'xprev' pointer " << _getExtent( txn, nextDiskLoc )->xprev.toString()
<< " in extent " << nextDiskLoc.toString()
<< " does not point to extent " << extentDiskLoc.toString();
results->errors.push_back( sb.str() );
results->valid = false;
}
if (nextDiskLoc.isNull() && extentDiskLoc != _details->lastExtent(txn)) {
StringBuilder sb;
sb << "'lastExtent' pointer " << _details->lastExtent(txn).toString()
<< " does not point to last extent in list " << extentDiskLoc.toString();
results->errors.push_back( sb.str() );
results->valid = false;
}
extentDiskLoc = nextDiskLoc;
extentCount++;
txn->checkForInterrupt();
}
}
catch (const DBException& e) {
StringBuilder sb;
sb << "exception validating extent " << extentCount
<< ": " << e.what();
results->errors.push_back( sb.str() );
results->valid = false;
return Status::OK();
}
output->append("extentCount", extentCount);
if ( full )
output->appendArray( "extents" , extentData.arr() );
}
try {
// 333333333333333333333333333
bool testingLastExtent = false;
try {
//.........这里部分代码省略.........
示例3: DiskLoc
DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents( OperationContext* txn,
int lenToAlloc ) {
// align size up to a multiple of 4
lenToAlloc = (lenToAlloc + (4-1)) & ~(4-1);
freelistAllocs.increment();
DiskLoc loc;
{
DiskLoc *prev = 0;
DiskLoc *bestprev = 0;
DiskLoc bestmatch;
int bestmatchlen = INT_MAX; // sentinel meaning we haven't found a record big enough
int b = bucket(lenToAlloc);
DiskLoc cur = _details->deletedListEntry(b);
int extra = 5; // look for a better fit, a little.
int chain = 0;
while ( 1 ) {
{ // defensive check
int fileNumber = cur.a();
int fileOffset = cur.getOfs();
if (fileNumber < -1 || fileNumber >= 100000 || fileOffset < 0) {
StringBuilder sb;
sb << "Deleted record list corrupted in collection " << _ns
<< ", bucket " << b
<< ", link number " << chain
<< ", invalid link is " << cur.toString()
<< ", throwing Fatal Assertion";
log() << sb.str() << endl;
fassertFailed(16469);
}
}
if ( cur.isNull() ) {
// move to next bucket. if we were doing "extra", just break
if ( bestmatchlen < INT_MAX )
break;
if ( chain > 0 ) {
// if we looked at things in the right bucket, but they were not suitable
freelistBucketExhausted.increment();
}
b++;
if ( b > MaxBucket ) {
// out of space. alloc a new extent.
freelistIterations.increment( 1 + chain );
return DiskLoc();
}
cur = _details->deletedListEntry(b);
prev = 0;
continue;
}
DeletedRecord *r = drec(cur);
if ( r->lengthWithHeaders() >= lenToAlloc &&
r->lengthWithHeaders() < bestmatchlen ) {
bestmatchlen = r->lengthWithHeaders();
bestmatch = cur;
bestprev = prev;
if (r->lengthWithHeaders() == lenToAlloc)
// exact match, stop searching
break;
}
if ( bestmatchlen < INT_MAX && --extra <= 0 )
break;
if ( ++chain > 30 && b <= MaxBucket ) {
// too slow, force move to next bucket to grab a big chunk
//b++;
freelistIterations.increment( chain );
chain = 0;
cur.Null();
}
else {
cur = r->nextDeleted();
prev = &r->nextDeleted();
}
}
// unlink ourself from the deleted list
DeletedRecord *bmr = drec(bestmatch);
if ( bestprev ) {
*txn->recoveryUnit()->writing(bestprev) = bmr->nextDeleted();
}
else {
// should be the front of a free-list
int myBucket = bucket(bmr->lengthWithHeaders());
invariant( _details->deletedListEntry(myBucket) == bestmatch );
_details->setDeletedListEntry(txn, myBucket, bmr->nextDeleted());
}
*txn->recoveryUnit()->writing(&bmr->nextDeleted()) = DiskLoc().setInvalid(); // defensive.
invariant(bmr->extentOfs() < bestmatch.getOfs());
freelistIterations.increment( 1 + chain );
loc = bestmatch;
}
if ( loc.isNull() )
return loc;
// determine if we should chop up
DeletedRecord *r = drec(loc);
//.........这里部分代码省略.........
示例4: writer
void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
const DiskLoc diskloc,
int extentNumber,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* compactOptions,
CompactStats* stats ) {
log() << "compact begin extent #" << extentNumber
<< " for namespace " << _ns << " " << diskloc;
unsigned oldObjSize = 0; // we'll report what the old padding was
unsigned oldObjSizeWithPadding = 0;
Extent *e = _extentManager->getExtent( diskloc );
e->assertOk();
fassert( 17437, e->validates(diskloc) );
{
// the next/prev pointers within the extent might not be in order so we first
// page the whole thing in sequentially
log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
Timer t;
size_t length = e->length;
touch_pages( reinterpret_cast<const char*>(e), length );
int ms = t.millis();
if( ms > 1000 )
log() << "compact end paging in " << ms << "ms "
<< e->length/1000000.0/t.seconds() << "MB/sec" << endl;
}
{
log() << "compact copying records" << endl;
long long datasize = 0;
long long nrecords = 0;
DiskLoc L = e->firstRecord;
if( !L.isNull() ) {
while( 1 ) {
Record *recOld = recordFor(L);
L = getNextRecordInExtent(L);
if ( compactOptions->validateDocuments && !adaptor->isDataValid(recOld) ) {
// object is corrupt!
log() << "compact skipping corrupt document!";
stats->corruptDocuments++;
}
else {
unsigned dataSize = adaptor->dataSize( recOld );
unsigned docSize = dataSize;
nrecords++;
oldObjSize += docSize;
oldObjSizeWithPadding += recOld->netLength();
unsigned lenWHdr = docSize + Record::HeaderSize;
unsigned lenWPadding = lenWHdr;
switch( compactOptions->paddingMode ) {
case CompactOptions::NONE:
if ( _details->isUserFlagSet(Flag_UsePowerOf2Sizes) )
lenWPadding = quantizePowerOf2AllocationSpace(lenWPadding);
break;
case CompactOptions::PRESERVE:
// if we are preserving the padding, the record should not change size
lenWPadding = recOld->lengthWithHeaders();
break;
case CompactOptions::MANUAL:
lenWPadding = compactOptions->computeRecordSize(lenWPadding);
if (lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
lenWPadding = lenWHdr;
}
break;
}
CompactDocWriter writer( recOld, dataSize, lenWPadding );
StatusWith<DiskLoc> status = insertRecord( txn, &writer, 0 );
uassertStatusOK( status.getStatus() );
datasize += recordFor( status.getValue() )->netLength();
adaptor->inserted( recordFor( status.getValue() ), status.getValue() );
}
if( L.isNull() ) {
// we just did the very last record from the old extent. it's still pointed to
// by the old extent ext, but that will be fixed below after this loop
break;
}
// remove the old records (orphan them) periodically so our commit block doesn't get too large
bool stopping = false;
RARELY stopping = !txn->checkForInterruptNoAssert().isOK();
if( stopping || txn->recoveryUnit()->isCommitNeeded() ) {
*txn->recoveryUnit()->writing(&e->firstRecord) = L;
Record *r = recordFor(L);
txn->recoveryUnit()->writingInt(r->prevOfs()) = DiskLoc::NullOfs;
txn->recoveryUnit()->commitIfNeeded();
txn->checkForInterrupt();
}
}
} // if !L.isNull()
//.........这里部分代码省略.........
示例5: _repairExtent
DiskLoc _repairExtent( Database* db , string ns, bool forward , DiskLoc eLoc , Writer& w ){
LogIndentLevel lil;
if ( eLoc.getOfs() <= 0 ){
error() << "invalid extent ofs: " << eLoc.getOfs() << endl;
return DiskLoc();
}
MongoDataFile * mdf = db->getFile( eLoc.a() );
Extent * e = mdf->debug_getExtent( eLoc );
if ( ! e->isOk() ){
warning() << "Extent not ok magic: " << e->magic << " going to try to continue" << endl;
}
log() << "length:" << e->length << endl;
LogIndentLevel lil2;
set<DiskLoc> seen;
DiskLoc loc = forward ? e->firstRecord : e->lastRecord;
while ( ! loc.isNull() ){
if ( ! seen.insert( loc ).second ) {
error() << "infinite loop in extent, seen: " << loc << " before" << endl;
break;
}
if ( loc.getOfs() <= 0 ){
error() << "offset is 0 for record which should be impossible" << endl;
break;
}
LOG(1) << loc << endl;
Record* rec = loc.rec();
BSONObj obj;
try {
obj = loc.obj();
verify( obj.valid() );
LOG(1) << obj << endl;
w( obj );
}
catch ( std::exception& e ) {
log() << "found invalid document @ " << loc << " " << e.what() << endl;
if ( ! obj.isEmpty() ) {
try {
BSONElement e = obj.firstElement();
stringstream ss;
ss << "first element: " << e;
log() << ss.str();
}
catch ( std::exception& ) {
log() << "unable to log invalid document @ " << loc << endl;
}
}
}
loc = forward ? rec->getNext( loc ) : rec->getPrev( loc );
// break when new loc is outside current extent boundary
if ( ( forward && loc.compare( e->lastRecord ) > 0 ) ||
( ! forward && loc.compare( e->firstRecord ) < 0 ) )
{
break;
}
}
log() << "wrote " << seen.size() << " documents" << endl;
return forward ? e->xnext : e->xprev;
}
示例6: max
DiskLoc MmapV1ExtentManager::_allocFromFreeList(OperationContext* txn,
int approxSize,
bool capped) {
// setup extent constraints
int low, high;
if (capped) {
// be strict about the size
low = approxSize;
if (low > 2048)
low -= 256;
high = (int)(approxSize * 1.05) + 256;
} else {
low = (int)(approxSize * 0.8);
high = (int)(approxSize * 1.4);
}
if (high <= 0) {
// overflowed
high = max(approxSize, maxSize());
}
if (high <= minSize()) {
// the minimum extent size is 4097
high = minSize() + 1;
}
// scan free list looking for something suitable
int n = 0;
Extent* best = 0;
int bestDiff = 0x7fffffff;
{
Timer t;
DiskLoc L = _getFreeListStart();
while (!L.isNull()) {
Extent* e = getExtent(L);
if (e->length >= low && e->length <= high) {
int diff = abs(e->length - approxSize);
if (diff < bestDiff) {
bestDiff = diff;
best = e;
if (((double)diff) / approxSize < 0.1) {
// close enough
break;
}
if (t.seconds() >= 2) {
// have spent lots of time in write lock, and we are in [low,high], so close
// enough could come into play if extent freelist is very long
break;
}
} else {
OCCASIONALLY {
if (high < 64 * 1024 && t.seconds() >= 2) {
// be less picky if it is taking a long time
high = 64 * 1024;
}
}
}
}
L = e->xnext;
++n;
}
if (t.seconds() >= 10) {
log() << "warning: slow scan in allocFromFreeList (in write lock)" << endl;
}
}
示例7: _updateById
/* note: this is only (as-is) called for
- not multi
- not mods is indexed
- not upsert
*/
static UpdateResult _updateById(bool isOperatorUpdate,
int idIdxNo,
ModSet* mods,
int profile,
NamespaceDetails* d,
NamespaceDetailsTransient *nsdt,
bool su,
const char* ns,
const BSONObj& updateobj,
BSONObj patternOrig,
bool logop,
OpDebug& debug,
bool fromMigrate = false) {
DiskLoc loc;
{
IndexDetails& i = d->idx(idIdxNo);
BSONObj key = i.getKeyFromQuery( patternOrig );
loc = i.idxInterface().findSingle(i, i.head, key);
if( loc.isNull() ) {
// no upsert support in _updateById yet, so we are done.
return UpdateResult( 0 , 0 , 0 , BSONObj() );
}
}
Record* r = loc.rec();
if ( cc().allowedToThrowPageFaultException() && ! r->likelyInPhysicalMemory() ) {
throw PageFaultException( r );
}
/* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
regular ones at the moment. */
if ( isOperatorUpdate ) {
const BSONObj& onDisk = loc.obj();
auto_ptr<ModSetState> mss = mods->prepare( onDisk );
if( mss->canApplyInPlace() ) {
mss->applyModsInPlace(true);
DEBUGUPDATE( "\t\t\t updateById doing in place update" );
}
else {
BSONObj newObj = mss->createNewFromMods();
checkTooLarge(newObj);
verify(nsdt);
theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , newObj.objdata(), newObj.objsize(), debug);
}
if ( logop ) {
DEV verify( mods->size() );
BSONObj pattern = patternOrig;
if ( mss->haveArrayDepMod() ) {
BSONObjBuilder patternBuilder;
patternBuilder.appendElements( pattern );
mss->appendSizeSpecForArrayDepMods( patternBuilder );
pattern = patternBuilder.obj();
}
if( mss->needOpLogRewrite() ) {
DEBUGUPDATE( "\t rewrite update: " << mss->getOpLogRewrite() );
logOp("u", ns, mss->getOpLogRewrite() ,
&pattern, 0, fromMigrate );
}
else {
logOp("u", ns, updateobj, &pattern, 0, fromMigrate );
}
}
return UpdateResult( 1 , 1 , 1 , BSONObj() );
} // end $operator update
// regular update
BSONElementManipulator::lookForTimestamps( updateobj );
checkNoMods( updateobj );
verify(nsdt);
theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug );
if ( logop ) {
logOp("u", ns, updateobj, &patternOrig, 0, fromMigrate );
}
return UpdateResult( 1 , 0 , 1 , BSONObj() );
}