本文整理汇总了C++中Extent::assertOk方法的典型用法代码示例。如果您正苦于以下问题:C++ Extent::assertOk方法的具体用法?C++ Extent::assertOk怎么用?C++ Extent::assertOk使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Extent
的用法示例。
在下文中一共展示了Extent::assertOk方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
Extent* MmapV1ExtentManager::getExtent( const DiskLoc& loc, bool doSanityCheck ) const {
loc.assertOk();
Extent* e = reinterpret_cast<Extent*>( _getOpenFile( loc.a() )->p() + loc.getOfs() );
if ( doSanityCheck )
e->assertOk();
return e;
}
示例2: getExtent
Extent* DummyExtentManager::getExtent( const DiskLoc& loc, bool doSanityCheck ) const {
invariant( !loc.isNull() );
invariant( static_cast<size_t>( loc.a() ) < _extents.size() );
invariant( loc.getOfs() == 0 );
Extent* ext = reinterpret_cast<Extent*>( _extents[loc.a()].data );
if (doSanityCheck)
ext->assertOk();
return ext;
}
示例3: _compactExtent
void Collection::_compactExtent(const DiskLoc diskloc, int extentNumber,
MultiIndexBlock& indexesToInsertTo,
const CompactOptions* compactOptions, CompactStats* stats ) {
log() << "compact begin extent #" << extentNumber
<< " for namespace " << _ns << " " << diskloc;
unsigned oldObjSize = 0; // we'll report what the old padding was
unsigned oldObjSizeWithPadding = 0;
Extent *e = diskloc.ext();
e->assertOk();
verify( e->validates(diskloc) );
{
// the next/prev pointers within the extent might not be in order so we first
// page the whole thing in sequentially
log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
Timer t;
size_t length = e->length;
touch_pages( reinterpret_cast<const char*>(e), length );
int ms = t.millis();
if( ms > 1000 )
log() << "compact end paging in " << ms << "ms "
<< e->length/1000000.0/t.seconds() << "MB/sec" << endl;
}
{
log() << "compact copying records" << endl;
long long datasize = 0;
long long nrecords = 0;
DiskLoc L = e->firstRecord;
if( !L.isNull() ) {
while( 1 ) {
Record *recOld = L.rec();
L = getExtentManager()->getNextRecordInExtent(L);
BSONObj objOld = BSONObj::make(recOld);
if ( compactOptions->validateDocuments && !objOld.valid() ) {
// object is corrupt!
log() << "compact skipping corrupt document!";
stats->corruptDocuments++;
}
else {
unsigned docSize = objOld.objsize();
nrecords++;
oldObjSize += docSize;
oldObjSizeWithPadding += recOld->netLength();
unsigned lenWHdr = docSize + Record::HeaderSize;
unsigned lenWPadding = lenWHdr;
switch( compactOptions->paddingMode ) {
case CompactOptions::NONE:
if ( details()->isUserFlagSet(NamespaceDetails::Flag_UsePowerOf2Sizes) )
lenWPadding = details()->quantizePowerOf2AllocationSpace(lenWPadding);
break;
case CompactOptions::PRESERVE:
// if we are preserving the padding, the record should not change size
lenWPadding = recOld->lengthWithHeaders();
break;
case CompactOptions::MANUAL:
lenWPadding = compactOptions->computeRecordSize(lenWPadding);
if (lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
lenWPadding = lenWHdr;
}
break;
}
CompactDocWriter writer( objOld, lenWPadding );
StatusWith<DiskLoc> status = _recordStore->insertRecord( &writer, 0 );
uassertStatusOK( status.getStatus() );
datasize += _recordStore->recordFor( status.getValue() )->netLength();
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed = true; // in compact we should be doing no checking
indexesToInsertTo.insert( objOld, status.getValue(), options );
}
if( L.isNull() ) {
// we just did the very last record from the old extent. it's still pointed to
// by the old extent ext, but that will be fixed below after this loop
break;
}
// remove the old records (orphan them) periodically so our commit block doesn't get too large
bool stopping = false;
RARELY stopping = *killCurrentOp.checkForInterruptNoAssert() != 0;
if( stopping || getDur().aCommitIsNeeded() ) {
e->firstRecord.writing() = L;
Record *r = L.rec();
getDur().writingInt(r->prevOfs()) = DiskLoc::NullOfs;
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt();
}
}
//.........这里部分代码省略.........
示例4: compactExtent
/** @return number of skipped (invalid) documents */
unsigned compactExtent(const char *ns, NamespaceDetails *d, const DiskLoc ext, int n,
const scoped_array<IndexSpec> &indexSpecs,
scoped_array<SortPhaseOne>& phase1, int nidx, bool validate,
double pf, int pb)
{
log() << "compact extent #" << n << endl;
unsigned oldObjSize = 0; // we'll report what the old padding was
unsigned oldObjSizeWithPadding = 0;
Extent *e = ext.ext();
e->assertOk();
assert( e->validates() );
unsigned skipped = 0;
{
// the next/prev pointers within the extent might not be in order so we first page the whole thing in
// sequentially
log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
Timer t;
MAdvise adv(e, e->length, MAdvise::Sequential);
const char *p = (const char *) e;
for( int i = 0; i < e->length; i += 4096 ) {
faux += p[i];
}
int ms = t.millis();
if( ms > 1000 )
log() << "compact end paging in " << ms << "ms " << e->length/1000000.0/ms << "MB/sec" << endl;
}
{
log() << "compact copying records" << endl;
unsigned totalSize = 0;
int nrecs = 0;
DiskLoc L = e->firstRecord;
if( !L.isNull() ) {
while( 1 ) {
Record *recOld = L.rec();
L = recOld->nextInExtent(L);
nrecs++;
BSONObj objOld(recOld);
if( !validate || objOld.valid() ) {
unsigned sz = objOld.objsize();
oldObjSize += sz;
oldObjSizeWithPadding += recOld->netLength();
unsigned lenWHdr = sz + Record::HeaderSize;
unsigned lenWPadding = lenWHdr;
{
lenWPadding = static_cast<unsigned>(pf*lenWPadding);
lenWPadding += pb;
lenWPadding = lenWPadding & quantizeMask(lenWPadding);
if( lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
lenWPadding = lenWHdr;
}
}
totalSize += lenWPadding;
DiskLoc loc = allocateSpaceForANewRecord(ns, d, lenWPadding, false);
uassert(14024, "compact error out of space during compaction", !loc.isNull());
Record *recNew = loc.rec();
recNew = (Record *) getDur().writingPtr(recNew, lenWHdr);
addRecordToRecListInExtent(recNew, loc);
memcpy(recNew->data, objOld.objdata(), sz);
{
// extract keys for all indexes we will be rebuilding
for( int x = 0; x < nidx; x++ ) {
phase1[x].addKeys(indexSpecs[x], objOld, loc);
}
}
}
else {
if( ++skipped <= 10 )
log() << "compact skipping invalid object" << endl;
}
if( L.isNull() ) {
// we just did the very last record from the old extent. it's still pointed to
// by the old extent ext, but that will be fixed below after this loop
break;
}
// remove the old records (orphan them) periodically so our commit block doesn't get too large
bool stopping = false;
RARELY stopping = *killCurrentOp.checkForInterruptNoAssert() != 0;
if( stopping || getDur().aCommitIsNeeded() ) {
e->firstRecord.writing() = L;
Record *r = L.rec();
getDur().writingInt(r->prevOfs) = DiskLoc::NullOfs;
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt(false);
}
}
} // if !L.isNull()
assert( d->firstExtent == ext );
assert( d->lastExtent != ext );
DiskLoc newFirst = e->xnext;
//.........这里部分代码省略.........
示例5: validateNS
string validateNS(const char *ns, NamespaceDetails *d, BSONObj *cmdObj) {
bool scanData = true;
if( cmdObj && cmdObj->hasElement("scandata") && !cmdObj->getBoolField("scandata") )
scanData = false;
bool valid = true;
stringstream ss;
ss << "\nvalidate\n";
//ss << " details: " << hex << d << " ofs:" << nsindex(ns)->detailsOffset(d) << dec << endl;
if ( d->capped )
ss << " capped:" << d->capped << " max:" << d->max << '\n';
ss << " firstExtent:" << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString()<< '\n';
ss << " lastExtent:" << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString() << '\n';
try {
d->firstExtent.ext()->assertOk();
d->lastExtent.ext()->assertOk();
DiskLoc el = d->firstExtent;
int ne = 0;
while( !el.isNull() ) {
Extent *e = el.ext();
e->assertOk();
el = e->xnext;
ne++;
killCurrentOp.checkForInterrupt();
}
ss << " # extents:" << ne << '\n';
}
catch (...) {
valid=false;
ss << " extent asserted ";
}
ss << " datasize?:" << d->stats.datasize << " nrecords?:" << d->stats.nrecords << " lastExtentSize:" << d->lastExtentSize << '\n';
ss << " padding:" << d->paddingFactor << '\n';
try {
try {
ss << " first extent:\n";
d->firstExtent.ext()->dump(ss);
valid = valid && d->firstExtent.ext()->validates();
}
catch (...) {
ss << "\n exception firstextent\n" << endl;
}
set<DiskLoc> recs;
if( scanData ) {
shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
int n = 0;
long long len = 0;
long long nlen = 0;
int outOfOrder = 0;
DiskLoc cl_last;
while ( c->ok() ) {
n++;
DiskLoc cl = c->currLoc();
if ( n < 1000000 )
recs.insert(cl);
if ( d->capped ) {
if ( cl < cl_last )
outOfOrder++;
cl_last = cl;
}
Record *r = c->_current();
len += r->lengthWithHeaders;
nlen += r->netLength();
c->advance();
}
if ( d->capped && !d->capLooped() ) {
ss << " capped outOfOrder:" << outOfOrder;
if ( outOfOrder > 1 ) {
valid = false;
ss << " ???";
}
else ss << " (OK)";
ss << '\n';
}
ss << " " << n << " objects found, nobj:" << d->stats.nrecords << '\n';
ss << " " << len << " bytes data w/headers\n";
ss << " " << nlen << " bytes data wout/headers\n";
}
ss << " deletedList: ";
for ( int i = 0; i < Buckets; i++ ) {
ss << (d->deletedList[i].isNull() ? '0' : '1');
}
ss << endl;
int ndel = 0;
long long delSize = 0;
int incorrect = 0;
for ( int i = 0; i < Buckets; i++ ) {
DiskLoc loc = d->deletedList[i];
try {
int k = 0;
while ( !loc.isNull() ) {
if ( recs.count(loc) )
incorrect++;
//.........这里部分代码省略.........
示例6: compactExtent
/** @return number of skipped (invalid) documents */
unsigned compactExtent(const char *ns, NamespaceDetails *d, const DiskLoc diskloc, int n,
const scoped_array<IndexSpec> &indexSpecs,
scoped_array<SortPhaseOne>& phase1, int nidx, bool validate,
double pf, int pb)
{
log() << "compact begin extent #" << n << " for namespace " << ns << endl;
unsigned oldObjSize = 0; // we'll report what the old padding was
unsigned oldObjSizeWithPadding = 0;
Extent *e = diskloc.ext();
e->assertOk();
verify( e->validates() );
unsigned skipped = 0;
{
// the next/prev pointers within the extent might not be in order so we first page the whole thing in
// sequentially
log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
Timer t;
MongoDataFile* mdf = cc().database()->getFile( diskloc.a() );
HANDLE fd = mdf->getFd();
int offset = diskloc.getOfs();
Extent* ext = diskloc.ext();
size_t length = ext->length;
touch_pages(fd, offset, length, ext);
int ms = t.millis();
if( ms > 1000 )
log() << "compact end paging in " << ms << "ms " << e->length/1000000.0/ms << "MB/sec" << endl;
}
{
log() << "compact copying records" << endl;
long long datasize = 0;
long long nrecords = 0;
DiskLoc L = e->firstRecord;
if( !L.isNull() ) {
while( 1 ) {
Record *recOld = L.rec();
L = recOld->nextInExtent(L);
BSONObj objOld = BSONObj::make(recOld);
if( !validate || objOld.valid() ) {
nrecords++;
unsigned sz = objOld.objsize();
oldObjSize += sz;
oldObjSizeWithPadding += recOld->netLength();
unsigned lenWHdr = sz + Record::HeaderSize;
unsigned lenWPadding = lenWHdr;
{
lenWPadding = static_cast<unsigned>(pf*lenWPadding);
lenWPadding += pb;
lenWPadding = lenWPadding & quantizeMask(lenWPadding);
if( lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
lenWPadding = lenWHdr;
}
}
DiskLoc loc = allocateSpaceForANewRecord(ns, d, lenWPadding, false);
uassert(14024, "compact error out of space during compaction", !loc.isNull());
Record *recNew = loc.rec();
datasize += recNew->netLength();
recNew = (Record *) getDur().writingPtr(recNew, lenWHdr);
addRecordToRecListInExtent(recNew, loc);
memcpy(recNew->data(), objOld.objdata(), sz);
{
// extract keys for all indexes we will be rebuilding
for( int x = 0; x < nidx; x++ ) {
phase1[x].addKeys(indexSpecs[x], objOld, loc);
}
}
}
else {
if( ++skipped <= 10 )
log() << "compact skipping invalid object" << endl;
}
if( L.isNull() ) {
// we just did the very last record from the old extent. it's still pointed to
// by the old extent ext, but that will be fixed below after this loop
break;
}
// remove the old records (orphan them) periodically so our commit block doesn't get too large
bool stopping = false;
RARELY stopping = *killCurrentOp.checkForInterruptNoAssert() != 0;
if( stopping || getDur().aCommitIsNeeded() ) {
e->firstRecord.writing() = L;
Record *r = L.rec();
getDur().writingInt(r->prevOfs()) = DiskLoc::NullOfs;
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt(false);
}
}
} // if !L.isNull()
verify( d->firstExtent == diskloc );
//.........这里部分代码省略.........
示例7: compactExtent
/** @return number of skipped (invalid) documents */
unsigned compactExtent(const char *ns, NamespaceDetails *d, const DiskLoc diskloc, int n,
int nidx, bool validate, double pf, int pb, bool useDefaultPadding,
bool preservePadding) {
log() << "compact begin extent #" << n << " for namespace " << ns << endl;
unsigned oldObjSize = 0; // we'll report what the old padding was
unsigned oldObjSizeWithPadding = 0;
Extent *e = diskloc.ext();
e->assertOk();
verify( e->validates(diskloc) );
unsigned skipped = 0;
Database* db = cc().database();
{
// the next/prev pointers within the extent might not be in order so we first
// page the whole thing in sequentially
log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
Timer t;
Extent* ext = db->getExtentManager().getExtent( diskloc );
size_t length = ext->length;
touch_pages( reinterpret_cast<const char*>(ext), length );
int ms = t.millis();
if( ms > 1000 )
log() << "compact end paging in " << ms << "ms "
<< e->length/1000000.0/ms << "MB/sec" << endl;
}
{
log() << "compact copying records" << endl;
long long datasize = 0;
long long nrecords = 0;
DiskLoc L = e->firstRecord;
if( !L.isNull() ) {
while( 1 ) {
Record *recOld = L.rec();
L = db->getExtentManager().getNextRecordInExtent(L);
BSONObj objOld = BSONObj::make(recOld);
if( !validate || objOld.valid() ) {
nrecords++;
unsigned sz = objOld.objsize();
oldObjSize += sz;
oldObjSizeWithPadding += recOld->netLength();
unsigned lenWHdr = sz + Record::HeaderSize;
unsigned lenWPadding = lenWHdr;
// if we are preserving the padding, the record should not change size
if (preservePadding) {
lenWPadding = recOld->lengthWithHeaders();
}
// maintain UsePowerOf2Sizes if no padding values were passed in
else if (d->isUserFlagSet(NamespaceDetails::Flag_UsePowerOf2Sizes)
&& useDefaultPadding) {
lenWPadding = d->quantizePowerOf2AllocationSpace(lenWPadding);
}
// otherwise use the padding values (pf and pb) that were passed in
else {
lenWPadding = static_cast<unsigned>(pf*lenWPadding);
lenWPadding += pb;
lenWPadding = lenWPadding & quantizeMask(lenWPadding);
}
if (lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
lenWPadding = lenWHdr;
}
DiskLoc loc = allocateSpaceForANewRecord(ns, d, lenWPadding, false);
uassert(14024, "compact error out of space during compaction", !loc.isNull());
Record *recNew = loc.rec();
datasize += recNew->netLength();
recNew = (Record *) getDur().writingPtr(recNew, lenWHdr);
addRecordToRecListInExtent(recNew, loc);
memcpy(recNew->data(), objOld.objdata(), sz);
}
else {
if( ++skipped <= 10 )
log() << "compact skipping invalid object" << endl;
}
if( L.isNull() ) {
// we just did the very last record from the old extent. it's still pointed to
// by the old extent ext, but that will be fixed below after this loop
break;
}
// remove the old records (orphan them) periodically so our commit block doesn't get too large
bool stopping = false;
RARELY stopping = *killCurrentOp.checkForInterruptNoAssert() != 0;
if( stopping || getDur().aCommitIsNeeded() ) {
e->firstRecord.writing() = L;
Record *r = L.rec();
getDur().writingInt(r->prevOfs()) = DiskLoc::NullOfs;
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt(false);
}
}
} // if !L.isNull()
//.........这里部分代码省略.........
示例8: writer
void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
const DiskLoc diskloc,
int extentNumber,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* compactOptions,
CompactStats* stats ) {
log() << "compact begin extent #" << extentNumber
<< " for namespace " << _ns << " " << diskloc;
unsigned oldObjSize = 0; // we'll report what the old padding was
unsigned oldObjSizeWithPadding = 0;
Extent *e = _extentManager->getExtent( diskloc );
e->assertOk();
fassert( 17437, e->validates(diskloc) );
{
// the next/prev pointers within the extent might not be in order so we first
// page the whole thing in sequentially
log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
Timer t;
size_t length = e->length;
touch_pages( reinterpret_cast<const char*>(e), length );
int ms = t.millis();
if( ms > 1000 )
log() << "compact end paging in " << ms << "ms "
<< e->length/1000000.0/t.seconds() << "MB/sec" << endl;
}
{
log() << "compact copying records" << endl;
long long datasize = 0;
long long nrecords = 0;
DiskLoc L = e->firstRecord;
if( !L.isNull() ) {
while( 1 ) {
Record *recOld = recordFor(L);
L = getNextRecordInExtent(L);
if ( compactOptions->validateDocuments && !adaptor->isDataValid(recOld) ) {
// object is corrupt!
log() << "compact skipping corrupt document!";
stats->corruptDocuments++;
}
else {
unsigned dataSize = adaptor->dataSize( recOld );
unsigned docSize = dataSize;
nrecords++;
oldObjSize += docSize;
oldObjSizeWithPadding += recOld->netLength();
unsigned lenWHdr = docSize + Record::HeaderSize;
unsigned lenWPadding = lenWHdr;
switch( compactOptions->paddingMode ) {
case CompactOptions::NONE:
if ( _details->isUserFlagSet(Flag_UsePowerOf2Sizes) )
lenWPadding = quantizePowerOf2AllocationSpace(lenWPadding);
break;
case CompactOptions::PRESERVE:
// if we are preserving the padding, the record should not change size
lenWPadding = recOld->lengthWithHeaders();
break;
case CompactOptions::MANUAL:
lenWPadding = compactOptions->computeRecordSize(lenWPadding);
if (lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
lenWPadding = lenWHdr;
}
break;
}
CompactDocWriter writer( recOld, dataSize, lenWPadding );
StatusWith<DiskLoc> status = insertRecord( txn, &writer, 0 );
uassertStatusOK( status.getStatus() );
datasize += recordFor( status.getValue() )->netLength();
adaptor->inserted( recordFor( status.getValue() ), status.getValue() );
}
if( L.isNull() ) {
// we just did the very last record from the old extent. it's still pointed to
// by the old extent ext, but that will be fixed below after this loop
break;
}
// remove the old records (orphan them) periodically so our commit block doesn't get too large
bool stopping = false;
RARELY stopping = !txn->checkForInterruptNoAssert().isOK();
if( stopping || txn->recoveryUnit()->isCommitNeeded() ) {
*txn->recoveryUnit()->writing(&e->firstRecord) = L;
Record *r = recordFor(L);
txn->recoveryUnit()->writingInt(r->prevOfs()) = DiskLoc::NullOfs;
txn->recoveryUnit()->commitIfNeeded();
txn->checkForInterrupt();
}
}
} // if !L.isNull()
//.........这里部分代码省略.........
示例9: validateNS
void validateNS(const char *ns, NamespaceDetails *d, const BSONObj& cmdObj, BSONObjBuilder& result) {
const bool full = cmdObj["full"].trueValue();
const bool scanData = full || cmdObj["scandata"].trueValue();
bool valid = true;
BSONArrayBuilder errors; // explanation(s) for why valid = false
if ( d->isCapped() ){
result.append("capped", d->isCapped());
result.appendNumber("max", d->maxCappedDocs());
}
result.append("firstExtent", str::stream() << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString());
result.append( "lastExtent", str::stream() << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString());
BSONArrayBuilder extentData;
try {
d->firstExtent.ext()->assertOk();
d->lastExtent.ext()->assertOk();
DiskLoc el = d->firstExtent;
int ne = 0;
while( !el.isNull() ) {
Extent *e = el.ext();
e->assertOk();
el = e->xnext;
ne++;
if ( full )
extentData << e->dump();
killCurrentOp.checkForInterrupt();
}
result.append("extentCount", ne);
}
catch (...) {
valid=false;
errors << "extent asserted";
}
if ( full )
result.appendArray( "extents" , extentData.arr() );
result.appendNumber("datasize", d->stats.datasize);
result.appendNumber("nrecords", d->stats.nrecords);
result.appendNumber("lastExtentSize", d->lastExtentSize);
result.appendNumber("padding", d->paddingFactor());
try {
try {
result.append("firstExtentDetails", d->firstExtent.ext()->dump());
valid = valid && d->firstExtent.ext()->validates() &&
d->firstExtent.ext()->xprev.isNull();
}
catch (...) {
errors << "exception firstextent";
valid = false;
}
set<DiskLoc> recs;
if( scanData ) {
shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
int n = 0;
int nInvalid = 0;
long long len = 0;
long long nlen = 0;
int outOfOrder = 0;
DiskLoc cl_last;
while ( c->ok() ) {
n++;
DiskLoc cl = c->currLoc();
if ( n < 1000000 )
recs.insert(cl);
if ( d->isCapped() ) {
if ( cl < cl_last )
outOfOrder++;
cl_last = cl;
}
Record *r = c->_current();
len += r->lengthWithHeaders();
nlen += r->netLength();
if (full){
BSONObj obj = BSONObj::make(r);
if (!obj.isValid() || !obj.valid()){ // both fast and deep checks
valid = false;
if (nInvalid == 0) // only log once;
errors << "invalid bson object detected (see logs for more info)";
nInvalid++;
if (strcmp("_id", obj.firstElementFieldName()) == 0){
try {
obj.firstElement().validate(); // throws on error
log() << "Invalid bson detected in " << ns << " with _id: " << obj.firstElement().toString(false) << endl;
}
//.........这里部分代码省略.........