本文整理汇总了C++中DiskLoc::ext方法的典型用法代码示例。如果您正苦于以下问题:C++ DiskLoc::ext方法的具体用法?C++ DiskLoc::ext怎么用?C++ DiskLoc::ext使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DiskLoc
的用法示例。
在下文中一共展示了DiskLoc::ext方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: touchNs
void touchNs( const std::string& ns ) {
std::vector< touch_location > ranges;
Client::ReadContext ctx(ns);
{
NamespaceDetails *nsd = nsdetails(ns.c_str());
uassert( 16154, "namespace does not exist", nsd );
for( DiskLoc L = nsd->firstExtent; !L.isNull(); L = L.ext()->xnext ) {
MongoDataFile* mdf = cc().database()->getFile( L.a() );
massert( 16238, "can't fetch extent file structure", mdf );
touch_location tl;
tl.fd = mdf->getFd();
tl.offset = L.getOfs();
tl.ext = L.ext();
tl.length = tl.ext->length;
ranges.push_back(tl);
}
}
LockMongoFilesShared lk;
Lock::TempRelease tr;
std::string progress_msg = "touch " + ns + " extents";
ProgressMeterHolder pm( cc().curop()->setMessage( progress_msg.c_str() , ranges.size() ) );
for ( std::vector< touch_location >::iterator it = ranges.begin(); it != ranges.end(); ++it ) {
touch_pages( it->fd, it->offset, it->length, it->ext );
pm.hit();
killCurrentOp.checkForInterrupt(false);
}
pm.finished();
}
示例2: dumpExtents
void NamespaceDetails::dumpExtents() {
cout << "dumpExtents:" << endl;
for ( DiskLoc i = _firstExtent; !i.isNull(); i = i.ext()->xnext ) {
Extent *e = i.ext();
stringstream ss;
e->dump(ss);
cout << ss.str() << endl;
}
}
示例3: nRecords
int nRecords() const {
int count = 0;
for ( DiskLoc i = nsd()->firstExtent; !i.isNull(); i = i.ext()->xnext ) {
int fileNo = i.ext()->firstRecord.a();
if ( fileNo == -1 )
continue;
for ( int j = i.ext()->firstRecord.getOfs(); j != DiskLoc::NullOfs;
j = DiskLoc( fileNo, j ).rec()->nextOfs ) {
++count;
}
}
ASSERT_EQUALS( count, nsd()->nrecords );
return count;
}
示例4: insert
// bypass standard alloc/insert routines to use the extent we want.
static DiskLoc insert( DiskLoc ext, int i ) {
BSONObjBuilder b;
b.append( "a", i );
BSONObj o = b.done();
int len = o.objsize();
Extent *e = ext.ext();
int ofs;
if ( e->lastRecord.isNull() )
ofs = ext.getOfs() + ( e->extentData - (char *)e );
else
ofs = e->lastRecord.getOfs() + e->lastRecord.rec()->lengthWithHeaders;
DiskLoc dl( ext.a(), ofs );
Record *r = dl.rec();
r->lengthWithHeaders = Record::HeaderSize + len;
r->extentOfs = e->myLoc.getOfs();
r->nextOfs = DiskLoc::NullOfs;
r->prevOfs = e->lastRecord.isNull() ? DiskLoc::NullOfs : e->lastRecord.getOfs();
memcpy( r->data, o.objdata(), len );
if ( e->firstRecord.isNull() )
e->firstRecord = dl;
else
e->lastRecord.rec()->nextOfs = ofs;
e->lastRecord = dl;
return dl;
}
示例5: emptyCappedCollection
void NamespaceDetails::emptyCappedCollection( const char *ns ) {
DEV verify( this == nsdetails(ns) );
massert( 13424, "collection must be capped", capped );
massert( 13425, "background index build in progress", !indexBuildInProgress );
massert( 13426, "indexes present", nIndexes == 0 );
// Clear all references to this namespace.
ClientCursor::invalidate( ns );
NamespaceDetailsTransient::clearForPrefix( ns );
// Get a writeable reference to 'this' and reset all pertinent
// attributes.
NamespaceDetails *t = writingWithoutExtra();
t->cappedLastDelRecLastExtent() = DiskLoc();
t->cappedListOfAllDeletedRecords() = DiskLoc();
// preserve firstExtent/lastExtent
t->capExtent = firstExtent;
t->stats.datasize = stats.nrecords = 0;
// lastExtentSize preserve
// nIndexes preserve 0
// capped preserve true
// max preserve
t->paddingFactor = 1.0;
t->flags = 0;
t->capFirstNewRecord = DiskLoc();
t->capFirstNewRecord.setInvalid();
t->cappedLastDelRecLastExtent().setInvalid();
// dataFileVersion preserve
// indexFileVersion preserve
t->multiKeyIndexBits = 0;
t->reservedA = 0;
t->extraOffset = 0;
// indexBuildInProgress preserve 0
memset(t->reserved, 0, sizeof(t->reserved));
// Reset all existing extents and recreate the deleted list.
for( DiskLoc ext = firstExtent; !ext.isNull(); ext = ext.ext()->xnext ) {
DiskLoc prev = ext.ext()->xprev;
DiskLoc next = ext.ext()->xnext;
DiskLoc empty = ext.ext()->reuse( ns, true );
ext.ext()->xprev.writing() = prev;
ext.ext()->xnext.writing() = next;
addDeletedRec( empty.drec(), empty );
}
}
示例6: emptyCappedCollection
void NamespaceDetails::emptyCappedCollection( const char *ns ) {
DEV assert( this == nsdetails(ns) );
massert( 13424, "collection must be capped", capped );
massert( 13425, "background index build in progress", !backgroundIndexBuildInProgress );
massert( 13426, "indexes present", nIndexes == 0 );
ClientCursor::invalidate( ns );
NamespaceDetailsTransient::clearForPrefix( ns );
cappedLastDelRecLastExtent() = DiskLoc();
cappedListOfAllDeletedRecords() = DiskLoc();
// preserve firstExtent/lastExtent
capExtent = firstExtent;
stats.datasize = stats.nrecords = 0;
// lastExtentSize preserve
// nIndexes preserve 0
// capped preserve true
// max preserve
paddingFactor = 1.0;
flags = 0;
capFirstNewRecord = DiskLoc();
capFirstNewRecord.setInvalid();
cappedLastDelRecLastExtent().setInvalid();
// dataFileVersion preserve
// indexFileVersion preserve
multiKeyIndexBits = 0;
reservedA = 0;
extraOffset = 0;
// backgroundIndexBuildInProgress preserve 0
memset(reserved, 0, sizeof(reserved));
for( DiskLoc ext = firstExtent; !ext.isNull(); ext = ext.ext()->xnext ) {
DiskLoc prev = ext.ext()->xprev;
DiskLoc next = ext.ext()->xnext;
DiskLoc empty = ext.ext()->reuse( ns );
ext.ext()->xprev = prev;
ext.ext()->xnext = next;
addDeletedRec( empty.drec(), empty );
}
}
示例7: _compact
bool _compact(const char *ns, NamespaceDetails *d, string& errmsg, bool validate, BSONObjBuilder& result, double pf, int pb) {
// this is a big job, so might as well make things tidy before we start just to be nice.
getDur().commitIfNeeded();
list<DiskLoc> extents;
for( DiskLoc L = d->firstExtent; !L.isNull(); L = L.ext()->xnext )
extents.push_back(L);
log() << "compact " << extents.size() << " extents" << endl;
ProgressMeterHolder pm( cc().curop()->setMessage( "compact extent" , extents.size() ) );
// same data, but might perform a little different after compact?
NamespaceDetailsTransient::get(ns).clearQueryCache();
int nidx = d->nIndexes;
scoped_array<IndexSpec> indexSpecs( new IndexSpec[nidx] );
scoped_array<SortPhaseOne> phase1( new SortPhaseOne[nidx] );
{
NamespaceDetails::IndexIterator ii = d->ii();
// For each existing index...
for( int idxNo = 0; ii.more(); ++idxNo ) {
// Build a new index spec based on the old index spec.
BSONObjBuilder b;
BSONObj::iterator i(ii.next().info.obj());
while( i.more() ) {
BSONElement e = i.next();
if ( str::equals( e.fieldName(), "v" ) ) {
// Drop any preexisting index version spec. The default index version will
// be used instead for the new index.
continue;
}
if ( str::equals( e.fieldName(), "background" ) ) {
// Create the new index in the foreground.
continue;
}
// Pass the element through to the new index spec.
b.append(e);
}
// Add the new index spec to 'indexSpecs'.
BSONObj o = b.obj().getOwned();
indexSpecs[idxNo].reset(o);
// Create an external sorter.
phase1[idxNo].sorter.reset
( new BSONObjExternalSorter
// Use the default index interface, since the new index will be created
// with the default index version.
( IndexInterface::defaultVersion(),
o.getObjectField("key") ) );
phase1[idxNo].sorter->hintNumObjects( d->stats.nrecords );
}
}
log() << "compact orphan deleted lists" << endl;
for( int i = 0; i < Buckets; i++ ) {
d->deletedList[i].writing().Null();
}
// Start over from scratch with our extent sizing and growth
d->lastExtentSize=0;
// before dropping indexes, at least make sure we can allocate one extent!
uassert(14025, "compact error no space available to allocate", !allocateSpaceForANewRecord(ns, d, Record::HeaderSize+1, false).isNull());
// note that the drop indexes call also invalidates all clientcursors for the namespace, which is important and wanted here
log() << "compact dropping indexes" << endl;
BSONObjBuilder b;
if( !dropIndexes(d, ns, "*", errmsg, b, true) ) {
errmsg = "compact drop indexes failed";
log() << errmsg << endl;
return false;
}
getDur().commitIfNeeded();
long long skipped = 0;
int n = 0;
// reset data size and record counts to 0 for this namespace
// as we're about to tally them up again for each new extent
{
NamespaceDetails::Stats *s = getDur().writing(&d->stats);
s->datasize = 0;
s->nrecords = 0;
}
for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
skipped += compactExtent(ns, d, *i, n++, indexSpecs, phase1, nidx, validate, pf, pb);
pm.hit();
}
if( skipped ) {
result.append("invalidObjects", skipped);
}
verify( d->firstExtent.ext()->xprev.isNull() );
// indexes will do their own progress meter?
pm.finished();
//.........这里部分代码省略.........
示例8: ii
StatusWith<CompactStats> Collection::compact( const CompactOptions* compactOptions ) {
if ( isCapped() )
return StatusWith<CompactStats>( ErrorCodes::BadValue,
"cannot compact capped collection" );
if ( _indexCatalog.numIndexesInProgress() )
return StatusWith<CompactStats>( ErrorCodes::BadValue,
"cannot compact when indexes in progress" );
NamespaceDetails* d = details();
// this is a big job, so might as well make things tidy before we start just to be nice.
getDur().commitIfNeeded();
list<DiskLoc> extents;
for( DiskLoc L = d->firstExtent(); !L.isNull(); L = L.ext()->xnext )
extents.push_back(L);
log() << "compact " << extents.size() << " extents" << endl;
// same data, but might perform a little different after compact?
_infoCache.reset();
vector<BSONObj> indexSpecs;
{
IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( false ) );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj());
const BSONObj key = spec.getObjectField("key");
const Status keyStatus = validateKeyPattern(key);
if (!keyStatus.isOK()) {
return StatusWith<CompactStats>(
ErrorCodes::CannotCreateIndex,
str::stream() << "Cannot rebuild index " << spec << ": "
<< keyStatus.reason()
<< " For more info see"
<< " http://dochub.mongodb.org/core/index-validation");
}
indexSpecs.push_back(spec);
}
}
log() << "compact orphan deleted lists" << endl;
d->orphanDeletedList();
// Start over from scratch with our extent sizing and growth
d->setLastExtentSize( 0 );
// before dropping indexes, at least make sure we can allocate one extent!
// this will allocate an extent and add to free list
// if it cannot, it will throw an exception
increaseStorageSize( _details->lastExtentSize(), true );
// note that the drop indexes call also invalidates all clientcursors for the namespace,
// which is important and wanted here
log() << "compact dropping indexes" << endl;
Status status = _indexCatalog.dropAllIndexes( true );
if ( !status.isOK() ) {
return StatusWith<CompactStats>( status );
}
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt();
CompactStats stats;
MultiIndexBlock multiIndexBlock( this );
status = multiIndexBlock.init( indexSpecs );
if ( !status.isOK() )
return StatusWith<CompactStats>( status );
// reset data size and record counts to 0 for this namespace
// as we're about to tally them up again for each new extent
d->setStats( 0, 0 );
ProgressMeterHolder pm(cc().curop()->setMessage("compact extent",
"Extent Compacting Progress",
extents.size()));
int extentNumber = 0;
for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
_compactExtent(*i, extentNumber++, multiIndexBlock, compactOptions, &stats );
pm.hit();
}
verify( d->firstExtent().ext()->xprev.isNull() );
// indexes will do their own progress meter?
pm.finished();
log() << "starting index commits";
status = multiIndexBlock.commit();
if ( !status.isOK() )
return StatusWith<CompactStats>( status );
return StatusWith<CompactStats>( stats );
}
示例9: _compactExtent
void Collection::_compactExtent(const DiskLoc diskloc, int extentNumber,
MultiIndexBlock& indexesToInsertTo,
const CompactOptions* compactOptions, CompactStats* stats ) {
log() << "compact begin extent #" << extentNumber
<< " for namespace " << _ns << " " << diskloc;
unsigned oldObjSize = 0; // we'll report what the old padding was
unsigned oldObjSizeWithPadding = 0;
Extent *e = diskloc.ext();
e->assertOk();
verify( e->validates(diskloc) );
{
// the next/prev pointers within the extent might not be in order so we first
// page the whole thing in sequentially
log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
Timer t;
size_t length = e->length;
touch_pages( reinterpret_cast<const char*>(e), length );
int ms = t.millis();
if( ms > 1000 )
log() << "compact end paging in " << ms << "ms "
<< e->length/1000000.0/t.seconds() << "MB/sec" << endl;
}
{
log() << "compact copying records" << endl;
long long datasize = 0;
long long nrecords = 0;
DiskLoc L = e->firstRecord;
if( !L.isNull() ) {
while( 1 ) {
Record *recOld = L.rec();
L = getExtentManager()->getNextRecordInExtent(L);
BSONObj objOld = BSONObj::make(recOld);
if ( compactOptions->validateDocuments && !objOld.valid() ) {
// object is corrupt!
log() << "compact skipping corrupt document!";
stats->corruptDocuments++;
}
else {
unsigned docSize = objOld.objsize();
nrecords++;
oldObjSize += docSize;
oldObjSizeWithPadding += recOld->netLength();
unsigned lenWHdr = docSize + Record::HeaderSize;
unsigned lenWPadding = lenWHdr;
switch( compactOptions->paddingMode ) {
case CompactOptions::NONE:
if ( details()->isUserFlagSet(NamespaceDetails::Flag_UsePowerOf2Sizes) )
lenWPadding = details()->quantizePowerOf2AllocationSpace(lenWPadding);
break;
case CompactOptions::PRESERVE:
// if we are preserving the padding, the record should not change size
lenWPadding = recOld->lengthWithHeaders();
break;
case CompactOptions::MANUAL:
lenWPadding = compactOptions->computeRecordSize(lenWPadding);
if (lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
lenWPadding = lenWHdr;
}
break;
}
CompactDocWriter writer( objOld, lenWPadding );
StatusWith<DiskLoc> status = _recordStore->insertRecord( &writer, 0 );
uassertStatusOK( status.getStatus() );
datasize += _recordStore->recordFor( status.getValue() )->netLength();
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed = true; // in compact we should be doing no checking
indexesToInsertTo.insert( objOld, status.getValue(), options );
}
if( L.isNull() ) {
// we just did the very last record from the old extent. it's still pointed to
// by the old extent ext, but that will be fixed below after this loop
break;
}
// remove the old records (orphan them) periodically so our commit block doesn't get too large
bool stopping = false;
RARELY stopping = *killCurrentOp.checkForInterruptNoAssert() != 0;
if( stopping || getDur().aCommitIsNeeded() ) {
e->firstRecord.writing() = L;
Record *r = L.rec();
getDur().writingInt(r->prevOfs()) = DiskLoc::NullOfs;
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt();
}
}
//.........这里部分代码省略.........
示例10: validateNS
string validateNS(const char *ns, NamespaceDetails *d, BSONObj *cmdObj) {
bool scanData = true;
if( cmdObj && cmdObj->hasElement("scandata") && !cmdObj->getBoolField("scandata") )
scanData = false;
bool valid = true;
stringstream ss;
ss << "\nvalidate\n";
//ss << " details: " << hex << d << " ofs:" << nsindex(ns)->detailsOffset(d) << dec << endl;
if ( d->capped )
ss << " capped:" << d->capped << " max:" << d->max << '\n';
ss << " firstExtent:" << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString()<< '\n';
ss << " lastExtent:" << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString() << '\n';
try {
d->firstExtent.ext()->assertOk();
d->lastExtent.ext()->assertOk();
DiskLoc el = d->firstExtent;
int ne = 0;
while( !el.isNull() ) {
Extent *e = el.ext();
e->assertOk();
el = e->xnext;
ne++;
killCurrentOp.checkForInterrupt();
}
ss << " # extents:" << ne << '\n';
}
catch (...) {
valid=false;
ss << " extent asserted ";
}
ss << " datasize?:" << d->stats.datasize << " nrecords?:" << d->stats.nrecords << " lastExtentSize:" << d->lastExtentSize << '\n';
ss << " padding:" << d->paddingFactor << '\n';
try {
try {
ss << " first extent:\n";
d->firstExtent.ext()->dump(ss);
valid = valid && d->firstExtent.ext()->validates();
}
catch (...) {
ss << "\n exception firstextent\n" << endl;
}
set<DiskLoc> recs;
if( scanData ) {
shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
int n = 0;
long long len = 0;
long long nlen = 0;
int outOfOrder = 0;
DiskLoc cl_last;
while ( c->ok() ) {
n++;
DiskLoc cl = c->currLoc();
if ( n < 1000000 )
recs.insert(cl);
if ( d->capped ) {
if ( cl < cl_last )
outOfOrder++;
cl_last = cl;
}
Record *r = c->_current();
len += r->lengthWithHeaders;
nlen += r->netLength();
c->advance();
}
if ( d->capped && !d->capLooped() ) {
ss << " capped outOfOrder:" << outOfOrder;
if ( outOfOrder > 1 ) {
valid = false;
ss << " ???";
}
else ss << " (OK)";
ss << '\n';
}
ss << " " << n << " objects found, nobj:" << d->stats.nrecords << '\n';
ss << " " << len << " bytes data w/headers\n";
ss << " " << nlen << " bytes data wout/headers\n";
}
ss << " deletedList: ";
for ( int i = 0; i < Buckets; i++ ) {
ss << (d->deletedList[i].isNull() ? '0' : '1');
}
ss << endl;
int ndel = 0;
long long delSize = 0;
int incorrect = 0;
for ( int i = 0; i < Buckets; i++ ) {
DiskLoc loc = d->deletedList[i];
try {
int k = 0;
while ( !loc.isNull() ) {
if ( recs.count(loc) )
incorrect++;
//.........这里部分代码省略.........
示例11: run
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string source = cmdObj.getStringField( name.c_str() );
string target = cmdObj.getStringField( "to" );
if ( source.empty() || target.empty() ) {
errmsg = "invalid command syntax";
return false;
}
setClient( source.c_str() );
NamespaceDetails *nsd = nsdetails( source.c_str() );
uassert( "source namespace does not exist", nsd );
bool capped = nsd->capped;
long long size = 0;
if ( capped )
for( DiskLoc i = nsd->firstExtent; !i.isNull(); i = i.ext()->xnext )
size += i.ext()->length;
setClient( target.c_str() );
uassert( "target namespace exists", !nsdetails( target.c_str() ) );
{
char from[256];
nsToClient( source.c_str(), from );
char to[256];
nsToClient( target.c_str(), to );
if ( strcmp( from, to ) == 0 ) {
renameNamespace( source.c_str(), target.c_str() );
return true;
}
}
BSONObjBuilder spec;
if ( capped ) {
spec.appendBool( "capped", true );
spec.append( "size", double( size ) );
}
if ( !userCreateNS( target.c_str(), spec.done(), errmsg, false ) )
return false;
auto_ptr< DBClientCursor > c;
DBDirectClient bridge;
{
c = bridge.query( source, BSONObj() );
}
while( 1 ) {
{
if ( !c->more() )
break;
}
BSONObj o = c->next();
theDataFileMgr.insert( target.c_str(), o );
}
char cl[256];
nsToClient( source.c_str(), cl );
string sourceIndexes = string( cl ) + ".system.indexes";
nsToClient( target.c_str(), cl );
string targetIndexes = string( cl ) + ".system.indexes";
{
c = bridge.query( sourceIndexes, QUERY( "ns" << source ) );
}
while( 1 ) {
{
if ( !c->more() )
break;
}
BSONObj o = c->next();
BSONObjBuilder b;
BSONObjIterator i( o );
while( i.moreWithEOO() ) {
BSONElement e = i.next();
if ( e.eoo() )
break;
if ( strcmp( e.fieldName(), "ns" ) == 0 ) {
b.append( "ns", target );
} else {
b.append( e );
}
}
BSONObj n = b.done();
theDataFileMgr.insert( targetIndexes.c_str(), n );
}
setClient( source.c_str() );
dropCollection( source, errmsg, result );
return true;
}
示例12: validateNS
void validateNS(const string& ns,
Collection* collection,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
const bool full = cmdObj["full"].trueValue();
const bool scanData = full || cmdObj["scandata"].trueValue();
NamespaceDetails* nsd = collection->details();
bool valid = true;
BSONArrayBuilder errors; // explanation(s) for why valid = false
if ( collection->isCapped() ){
result.append("capped", nsd->isCapped());
result.appendNumber("max", nsd->maxCappedDocs());
}
if ( nsd->firstExtent().isNull() )
result.append( "firstExtent", "null" );
else
result.append( "firstExtent", str::stream() << nsd->firstExtent().toString()
<< " ns:" << nsd->firstExtent().ext()->nsDiagnostic.toString());
if ( nsd->lastExtent().isNull() )
result.append( "lastExtent", "null" );
else
result.append( "lastExtent", str::stream() << nsd->lastExtent().toString()
<< " ns:" << nsd->lastExtent().ext()->nsDiagnostic.toString());
BSONArrayBuilder extentData;
int extentCount = 0;
try {
if ( !nsd->firstExtent().isNull() ) {
nsd->firstExtent().ext()->assertOk();
nsd->lastExtent().ext()->assertOk();
}
DiskLoc extentDiskLoc = nsd->firstExtent();
while (!extentDiskLoc.isNull()) {
Extent* thisExtent = extentDiskLoc.ext();
if (full) {
extentData << thisExtent->dump();
}
if (!thisExtent->validates(extentDiskLoc, &errors)) {
valid = false;
}
DiskLoc nextDiskLoc = thisExtent->xnext;
if (extentCount > 0 && !nextDiskLoc.isNull()
&& nextDiskLoc.ext()->xprev != extentDiskLoc) {
StringBuilder sb;
sb << "'xprev' pointer " << nextDiskLoc.ext()->xprev.toString()
<< " in extent " << nextDiskLoc.toString()
<< " does not point to extent " << extentDiskLoc.toString();
errors << sb.str();
valid = false;
}
if (nextDiskLoc.isNull() && extentDiskLoc != nsd->lastExtent()) {
StringBuilder sb;
sb << "'lastExtent' pointer " << nsd->lastExtent().toString()
<< " does not point to last extent in list " << extentDiskLoc.toString();
errors << sb.str();
valid = false;
}
extentDiskLoc = nextDiskLoc;
extentCount++;
killCurrentOp.checkForInterrupt();
}
}
catch (const DBException& e) {
StringBuilder sb;
sb << "exception validating extent " << extentCount
<< ": " << e.what();
errors << sb.str();
valid = false;
}
result.append("extentCount", extentCount);
if ( full )
result.appendArray( "extents" , extentData.arr() );
result.appendNumber("datasize", nsd->dataSize());
result.appendNumber("nrecords", nsd->numRecords());
result.appendNumber("lastExtentSize", nsd->lastExtentSize());
result.appendNumber("padding", nsd->paddingFactor());
try {
bool testingLastExtent = false;
try {
if (nsd->firstExtent().isNull()) {
// this is ok
}
else {
result.append("firstExtentDetails", nsd->firstExtent().ext()->dump());
if (!nsd->firstExtent().ext()->xprev.isNull()) {
StringBuilder sb;
sb << "'xprev' pointer in 'firstExtent' " << nsd->firstExtent().toString()
<< " is " << nsd->firstExtent().ext()->xprev.toString()
<< ", should be null";
errors << sb.str();
//.........这里部分代码省略.........
示例13: emptyCappedCollection
void NamespaceDetails::emptyCappedCollection( const char *ns ) {
DEV verify( this == nsdetails(ns) );
massert( 13424, "collection must be capped", isCapped() );
massert( 13425, "background index build in progress", !_indexBuildsInProgress );
vector<BSONObj> indexes = Helpers::findAll( Namespace( ns ).getSisterNS( "system.indexes" ) , BSON( "ns" << ns ) );
for ( unsigned i=0; i<indexes.size(); i++ ) {
indexes[i] = indexes[i].copy();
}
if ( _nIndexes ) {
string errmsg;
BSONObjBuilder note;
bool res = dropIndexes( this , ns , "*" , errmsg , note , true );
massert( 13426 , str::stream() << "failed during index drop: " << errmsg , res );
}
// Clear all references to this namespace.
ClientCursor::invalidate( ns );
NamespaceDetailsTransient::resetCollection( ns );
// Get a writeable reference to 'this' and reset all pertinent
// attributes.
NamespaceDetails *t = writingWithoutExtra();
t->cappedLastDelRecLastExtent() = DiskLoc();
t->cappedListOfAllDeletedRecords() = DiskLoc();
// preserve firstExtent/lastExtent
t->_capExtent = _firstExtent;
t->_stats.datasize = _stats.nrecords = 0;
// lastExtentSize preserve
// nIndexes preserve 0
// capped preserve true
// max preserve
t->_paddingFactor = 1.0;
t->_systemFlags = 0;
t->_capFirstNewRecord = DiskLoc();
t->_capFirstNewRecord.setInvalid();
t->cappedLastDelRecLastExtent().setInvalid();
// dataFileVersion preserve
// indexFileVersion preserve
t->_multiKeyIndexBits = 0;
t->_reservedA = 0;
t->_extraOffset = 0;
// indexBuildInProgress preserve 0
memset(t->_reserved, 0, sizeof(t->_reserved));
// Reset all existing extents and recreate the deleted list.
for( DiskLoc ext = _firstExtent; !ext.isNull(); ext = ext.ext()->xnext ) {
DiskLoc prev = ext.ext()->xprev;
DiskLoc next = ext.ext()->xnext;
DiskLoc empty = ext.ext()->reuse( ns, true );
ext.ext()->xprev.writing() = prev;
ext.ext()->xnext.writing() = next;
addDeletedRec( empty.drec(), empty );
}
for ( unsigned i=0; i<indexes.size(); i++ ) {
theDataFileMgr.insertWithObjMod(Namespace( ns ).getSisterNS( "system.indexes" ).c_str(),
indexes[i],
false,
true);
}
}
示例14: cloneCollectionAsCapped
Status cloneCollectionAsCapped( Database* db,
const string& shortFrom,
const string& shortTo,
double size,
bool temp,
bool logForReplication ) {
string fromNs = db->name() + "." + shortFrom;
string toNs = db->name() + "." + shortTo;
Collection* fromCollection = db->getCollection( fromNs );
if ( !fromCollection )
return Status( ErrorCodes::NamespaceNotFound,
str::stream() << "source collection " << fromNs << " does not exist" );
if ( db->getCollection( toNs ) )
return Status( ErrorCodes::NamespaceExists, "to collection already exists" );
// create new collection
{
Client::Context ctx( toNs );
BSONObjBuilder spec;
spec.appendBool( "capped", true );
spec.append( "size", size );
if ( temp )
spec.appendBool( "temp", true );
string errmsg;
if ( !userCreateNS( toNs.c_str(), spec.done(), errmsg, logForReplication ) )
return Status( ErrorCodes::InternalError, errmsg );
}
auto_ptr<Runner> runner;
{
const NamespaceDetails* details = fromCollection->details();
DiskLoc extent = details->firstExtent();
// datasize and extentSize can't be compared exactly, so add some padding to 'size'
long long excessSize =
static_cast<long long>( fromCollection->dataSize() - size * 2 );
// skip ahead some extents since not all the data fits,
// so we have to chop a bunch off
for( ;
excessSize > extent.ext()->length && extent != details->lastExtent();
extent = extent.ext()->xnext ) {
excessSize -= extent.ext()->length;
LOG( 2 ) << "cloneCollectionAsCapped skipping extent of size "
<< extent.ext()->length << endl;
LOG( 6 ) << "excessSize: " << excessSize << endl;
}
DiskLoc startLoc = extent.ext()->firstRecord;
runner.reset( InternalPlanner::collectionScan(fromNs,
InternalPlanner::FORWARD,
startLoc) );
}
Collection* toCollection = db->getCollection( toNs );
verify( toCollection );
while ( true ) {
BSONObj obj;
Runner::RunnerState state = runner->getNext(&obj, NULL);
switch( state ) {
case Runner::RUNNER_EOF:
return Status::OK();
case Runner::RUNNER_DEAD:
db->dropCollection( toNs );
return Status( ErrorCodes::InternalError, "runner turned dead while iterating" );
case Runner::RUNNER_ERROR:
return Status( ErrorCodes::InternalError, "runner error while iterating" );
case Runner::RUNNER_ADVANCED:
toCollection->insertDocument( obj, true );
if ( logForReplication )
logOp( "i", toNs.c_str(), obj );
getDur().commitIfNeeded();
}
}
verify( false ); // unreachable
}
示例15: validateNS
void validateNS(const char *ns, NamespaceDetails *d, const BSONObj& cmdObj, BSONObjBuilder& result) {
const bool full = cmdObj["full"].trueValue();
const bool scanData = full || cmdObj["scandata"].trueValue();
bool valid = true;
BSONArrayBuilder errors; // explanation(s) for why valid = false
if ( d->isCapped() ){
result.append("capped", d->isCapped());
result.appendNumber("max", d->maxCappedDocs());
}
result.append("firstExtent", str::stream() << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString());
result.append( "lastExtent", str::stream() << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString());
BSONArrayBuilder extentData;
try {
d->firstExtent.ext()->assertOk();
d->lastExtent.ext()->assertOk();
DiskLoc el = d->firstExtent;
int ne = 0;
while( !el.isNull() ) {
Extent *e = el.ext();
e->assertOk();
el = e->xnext;
ne++;
if ( full )
extentData << e->dump();
killCurrentOp.checkForInterrupt();
}
result.append("extentCount", ne);
}
catch (...) {
valid=false;
errors << "extent asserted";
}
if ( full )
result.appendArray( "extents" , extentData.arr() );
result.appendNumber("datasize", d->stats.datasize);
result.appendNumber("nrecords", d->stats.nrecords);
result.appendNumber("lastExtentSize", d->lastExtentSize);
result.appendNumber("padding", d->paddingFactor());
try {
try {
result.append("firstExtentDetails", d->firstExtent.ext()->dump());
valid = valid && d->firstExtent.ext()->validates() &&
d->firstExtent.ext()->xprev.isNull();
}
catch (...) {
errors << "exception firstextent";
valid = false;
}
set<DiskLoc> recs;
if( scanData ) {
shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
int n = 0;
int nInvalid = 0;
long long len = 0;
long long nlen = 0;
int outOfOrder = 0;
DiskLoc cl_last;
while ( c->ok() ) {
n++;
DiskLoc cl = c->currLoc();
if ( n < 1000000 )
recs.insert(cl);
if ( d->isCapped() ) {
if ( cl < cl_last )
outOfOrder++;
cl_last = cl;
}
Record *r = c->_current();
len += r->lengthWithHeaders();
nlen += r->netLength();
if (full){
BSONObj obj = BSONObj::make(r);
if (!obj.isValid() || !obj.valid()){ // both fast and deep checks
valid = false;
if (nInvalid == 0) // only log once;
errors << "invalid bson object detected (see logs for more info)";
nInvalid++;
if (strcmp("_id", obj.firstElementFieldName()) == 0){
try {
obj.firstElement().validate(); // throws on error
log() << "Invalid bson detected in " << ns << " with _id: " << obj.firstElement().toString(false) << endl;
}
//.........这里部分代码省略.........