本文整理汇总了C++中BSONObj::valid方法的典型用法代码示例。如果您正苦于以下问题:C++ BSONObj::valid方法的具体用法?C++ BSONObj::valid怎么用?C++ BSONObj::valid使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BSONObj
的用法示例。
在下文中一共展示了BSONObj::valid方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run
void run(){
client.insert( _a , BSON( "a" << "17" ) );
{
BSONObj fromA = client.findOne( _a , BSONObj() );
assert( fromA.valid() );
//cout << "Froma : " << fromA << endl;
BSONObjBuilder b;
b.append( "b" , 18 );
b.appendDBRef( "c" , "dbref.a" , fromA["_id"].__oid() );
client.insert( _b , b.obj() );
}
ASSERT( client.eval( "unittest" , "x = db.dbref.b.findOne(); assert.eq( 17 , x.c.fetch().a , 'ref working' );" ) );
// BSON DBRef <=> JS DBPointer
ASSERT( client.eval( "unittest", "x = db.dbref.b.findOne(); db.dbref.b.drop(); x.c = new DBPointer( x.c.ns, x.c.id ); db.dbref.b.insert( x );" ) );
ASSERT_EQUALS( DBRef, client.findOne( "unittest.dbref.b", "" )[ "c" ].type() );
// BSON Object <=> JS DBRef
ASSERT( client.eval( "unittest", "x = db.dbref.b.findOne(); db.dbref.b.drop(); x.c = new DBRef( x.c.ns, x.c.id ); db.dbref.b.insert( x );" ) );
ASSERT_EQUALS( Object, client.findOne( "unittest.dbref.b", "" )[ "c" ].type() );
ASSERT_EQUALS( string( "dbref.a" ), client.findOne( "unittest.dbref.b", "" )[ "c" ].embeddedObject().getStringField( "$ref" ) );
}
示例2: nextJsObj
BSONObj nextJsObj( const char *context ) {
BSONObj ret = DbMessage::nextJsObj();
if ( objcheck && !ret.valid() ) {
// TODO provide more debugging info
cout << "invalid object in " << context << ": " << ret.hexDump() << endl;
}
return ret;
}
示例3: recordUpdate
void LastError::recordUpdate(bool updateObjects, long long nObjects, BSONObj upsertedId) {
reset(true);
_nObjects = nObjects;
_updatedExisting = updateObjects ? True : False;
// Use the latest BSON validation version. We record updates containing decimal data even if
// decimal is disabled.
if (upsertedId.valid(BSONVersion::kLatest) && upsertedId.hasField(kUpsertedFieldName))
_upsertedId = upsertedId;
}
示例4: copy
/* copy the specified collection
isindex - if true, this is system.indexes collection, in which we do some transformation when copying.
*/
void Cloner::copy(const char *from_collection, const char *to_collection, bool isindex, bool logForRepl, bool masterSameProcess, bool slaveOk, BSONObj query) {
auto_ptr<DBClientCursor> c;
{
dbtemprelease r;
c = conn->query( from_collection, query, 0, 0, 0, slaveOk ? Option_SlaveOk : 0 );
}
assert( c.get() );
long long n = 0;
time_t saveLast = time( 0 );
while ( 1 ) {
{
dbtemprelease r;
if ( !c->more() )
break;
}
BSONObj tmp = c->next();
/* assure object is valid. note this will slow us down a good bit. */
if ( !tmp.valid() ) {
out() << "skipping corrupt object from " << from_collection << '\n';
continue;
}
++n;
BSONObj js = tmp;
if ( isindex ) {
assert( strstr(from_collection, "system.indexes") );
js = fixindex(tmp);
}
try {
theDataFileMgr.insert(to_collection, js);
if ( logForRepl )
logOp("i", to_collection, js);
}
catch( UserException& e ) {
log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
}
RARELY if ( time( 0 ) - saveLast > 60 ) {
log() << n << " objects cloned so far from collection " << from_collection << endl;
saveLast = time( 0 );
}
}
}
示例5: _repairExtents
void _repairExtents(OperationContext* opCtx, Collection* coll, Writer& writer) {
scoped_ptr<RecordIterator> iter(coll->getRecordStore()->getIteratorForRepair(opCtx));
for (DiskLoc currLoc = iter->getNext(); !currLoc.isNull(); currLoc = iter->getNext()) {
if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1))) {
toolInfoLog() << currLoc << std::endl;
}
BSONObj obj;
try {
obj = coll->docFor(opCtx, currLoc);
// If this is a corrupted object, just skip it, but do not abort the scan
//
if (!obj.valid()) {
continue;
}
if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1))) {
toolInfoLog() << obj << std::endl;
}
writer(obj);
}
catch ( std::exception& e ) {
toolError() << "found invalid document @ " << currLoc << " " << e.what() << std::endl;
if ( ! obj.isEmpty() ) {
try {
BSONElement e = obj.firstElement();
stringstream ss;
ss << "first element: " << e;
toolError() << ss.str() << std::endl;
}
catch ( std::exception& ) {
toolError() << "unable to log invalid document @ " << currLoc << std::endl;
}
}
}
}
}
示例6: _compactExtent
void Collection::_compactExtent(const DiskLoc diskloc, int extentNumber,
MultiIndexBlock& indexesToInsertTo,
const CompactOptions* compactOptions, CompactStats* stats ) {
log() << "compact begin extent #" << extentNumber
<< " for namespace " << _ns << " " << diskloc;
unsigned oldObjSize = 0; // we'll report what the old padding was
unsigned oldObjSizeWithPadding = 0;
Extent *e = diskloc.ext();
e->assertOk();
verify( e->validates(diskloc) );
{
// the next/prev pointers within the extent might not be in order so we first
// page the whole thing in sequentially
log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
Timer t;
size_t length = e->length;
touch_pages( reinterpret_cast<const char*>(e), length );
int ms = t.millis();
if( ms > 1000 )
log() << "compact end paging in " << ms << "ms "
<< e->length/1000000.0/t.seconds() << "MB/sec" << endl;
}
{
log() << "compact copying records" << endl;
long long datasize = 0;
long long nrecords = 0;
DiskLoc L = e->firstRecord;
if( !L.isNull() ) {
while( 1 ) {
Record *recOld = L.rec();
L = getExtentManager()->getNextRecordInExtent(L);
BSONObj objOld = BSONObj::make(recOld);
if ( compactOptions->validateDocuments && !objOld.valid() ) {
// object is corrupt!
log() << "compact skipping corrupt document!";
stats->corruptDocuments++;
}
else {
unsigned docSize = objOld.objsize();
nrecords++;
oldObjSize += docSize;
oldObjSizeWithPadding += recOld->netLength();
unsigned lenWHdr = docSize + Record::HeaderSize;
unsigned lenWPadding = lenWHdr;
switch( compactOptions->paddingMode ) {
case CompactOptions::NONE:
if ( details()->isUserFlagSet(NamespaceDetails::Flag_UsePowerOf2Sizes) )
lenWPadding = details()->quantizePowerOf2AllocationSpace(lenWPadding);
break;
case CompactOptions::PRESERVE:
// if we are preserving the padding, the record should not change size
lenWPadding = recOld->lengthWithHeaders();
break;
case CompactOptions::MANUAL:
lenWPadding = compactOptions->computeRecordSize(lenWPadding);
if (lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
lenWPadding = lenWHdr;
}
break;
}
CompactDocWriter writer( objOld, lenWPadding );
StatusWith<DiskLoc> status = _recordStore->insertRecord( &writer, 0 );
uassertStatusOK( status.getStatus() );
datasize += _recordStore->recordFor( status.getValue() )->netLength();
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed = true; // in compact we should be doing no checking
indexesToInsertTo.insert( objOld, status.getValue(), options );
}
if( L.isNull() ) {
// we just did the very last record from the old extent. it's still pointed to
// by the old extent ext, but that will be fixed below after this loop
break;
}
// remove the old records (orphan them) periodically so our commit block doesn't get too large
bool stopping = false;
RARELY stopping = *killCurrentOp.checkForInterruptNoAssert() != 0;
if( stopping || getDur().aCommitIsNeeded() ) {
e->firstRecord.writing() = L;
Record *r = L.rec();
getDur().writingInt(r->prevOfs()) = DiskLoc::NullOfs;
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt();
}
}
//.........这里部分代码省略.........
示例7: operator
void operator()( DBClientCursorBatchIterator &i ) {
mongolock l( true );
if ( context ) {
context->relocked();
}
while( i.moreInCurrentBatch() ) {
if ( n % 128 == 127 /*yield some*/ ) {
time_t now = time(0);
if( now - lastLog >= 60 ) {
// report progress
if( lastLog )
log() << "clone " << to_collection << ' ' << n << endl;
lastLog = now;
}
mayInterrupt( _mayBeInterrupted );
dbtempreleaseif t( _mayYield );
}
BSONObj tmp = i.nextSafe();
/* assure object is valid. note this will slow us down a little. */
if ( !tmp.valid() ) {
stringstream ss;
ss << "Cloner: skipping corrupt object from " << from_collection;
BSONElement e = tmp.firstElement();
try {
e.validate();
ss << " firstElement: " << e;
}
catch( ... ) {
ss << " firstElement corrupt";
}
out() << ss.str() << endl;
continue;
}
++n;
BSONObj js = tmp;
if ( isindex ) {
verify( strstr(from_collection, "system.indexes") );
js = fixindex(tmp);
storedForLater->push_back( js.getOwned() );
continue;
}
try {
theDataFileMgr.insertWithObjMod(to_collection, js);
if ( logForRepl )
logOp("i", to_collection, js);
getDur().commitIfNeeded();
}
catch( UserException& e ) {
log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
}
RARELY if ( time( 0 ) - saveLast > 60 ) {
log() << n << " objects cloned so far from collection " << from_collection << endl;
saveLast = time( 0 );
}
}
}
示例8: copy
/* copy the specified collection
isindex - if true, this is system.indexes collection, in which we do some transformation when copying.
*/
void Cloner::copy(const char *from_collection, const char *to_collection, bool isindex, bool logForRepl, bool masterSameProcess, bool slaveOk, Query query) {
auto_ptr<DBClientCursor> c;
{
dbtemprelease r;
c = conn->query( from_collection, query, 0, 0, 0, Option_NoCursorTimeout | ( slaveOk ? Option_SlaveOk : 0 ) );
}
list<BSONObj> storedForLater;
assert( c.get() );
long long n = 0;
time_t saveLast = time( 0 );
while ( 1 ) {
{
dbtemprelease r;
if ( !c->more() )
break;
}
BSONObj tmp = c->next();
/* assure object is valid. note this will slow us down a little. */
if ( !tmp.valid() ) {
stringstream ss;
ss << "skipping corrupt object from " << from_collection;
BSONElement e = tmp.firstElement();
try {
e.validate();
ss << " firstElement: " << e;
}
catch( ... ){
ss << " firstElement corrupt";
}
out() << ss.str() << endl;
continue;
}
++n;
BSONObj js = tmp;
if ( isindex ) {
assert( strstr(from_collection, "system.indexes") );
js = fixindex(tmp);
storedForLater.push_back( js.getOwned() );
continue;
}
try {
theDataFileMgr.insert(to_collection, js);
if ( logForRepl )
logOp("i", to_collection, js);
}
catch( UserException& e ) {
log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
}
RARELY if ( time( 0 ) - saveLast > 60 ) {
log() << n << " objects cloned so far from collection " << from_collection << endl;
saveLast = time( 0 );
}
}
if ( storedForLater.size() ){
for ( list<BSONObj>::iterator i = storedForLater.begin(); i!=storedForLater.end(); i++ ){
BSONObj js = *i;
try {
theDataFileMgr.insert(to_collection, js);
if ( logForRepl )
logOp("i", to_collection, js);
}
catch( UserException& e ) {
log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
}
}
}
}
示例9: _repairExtent
DiskLoc _repairExtent( Database* db , string ns, bool forward , DiskLoc eLoc , Writer& w ){
LogIndentLevel lil;
if ( eLoc.getOfs() <= 0 ){
error() << "invalid extent ofs: " << eLoc.getOfs() << endl;
return DiskLoc();
}
MongoDataFile * mdf = db->getFile( eLoc.a() );
Extent * e = mdf->debug_getExtent( eLoc );
if ( ! e->isOk() ){
warning() << "Extent not ok magic: " << e->magic << " going to try to continue" << endl;
}
log() << "length:" << e->length << endl;
LogIndentLevel lil2;
set<DiskLoc> seen;
DiskLoc loc = forward ? e->firstRecord : e->lastRecord;
while ( ! loc.isNull() ){
if ( ! seen.insert( loc ).second ) {
error() << "infinite loop in extend, seen: " << loc << " before" << endl;
break;
}
if ( loc.getOfs() <= 0 ){
error() << "offset is 0 for record which should be impossible" << endl;
break;
}
log(1) << loc << endl;
Record* rec = loc.rec();
BSONObj obj;
try {
obj = loc.obj();
assert( obj.valid() );
LOG(1) << obj << endl;
w( obj );
}
catch ( std::exception& e ) {
log() << "found invalid document @ " << loc << " " << e.what() << endl;
if ( ! obj.isEmpty() ) {
try {
BSONElement e = obj.firstElement();
stringstream ss;
ss << "first element: " << e;
log() << ss.str();
}
catch ( std::exception& ) {
}
}
}
loc = forward ? rec->getNext( loc ) : rec->getPrev( loc );
}
return forward ? e->xnext : e->xprev;
}
示例10: compactExtent
/** @return number of skipped (invalid) documents */
unsigned compactExtent(const char *ns, NamespaceDetails *d, const DiskLoc diskloc, int n,
const scoped_array<IndexSpec> &indexSpecs,
scoped_array<SortPhaseOne>& phase1, int nidx, bool validate,
double pf, int pb)
{
log() << "compact begin extent #" << n << " for namespace " << ns << endl;
unsigned oldObjSize = 0; // we'll report what the old padding was
unsigned oldObjSizeWithPadding = 0;
Extent *e = diskloc.ext();
e->assertOk();
verify( e->validates() );
unsigned skipped = 0;
{
// the next/prev pointers within the extent might not be in order so we first page the whole thing in
// sequentially
log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
Timer t;
MongoDataFile* mdf = cc().database()->getFile( diskloc.a() );
HANDLE fd = mdf->getFd();
int offset = diskloc.getOfs();
Extent* ext = diskloc.ext();
size_t length = ext->length;
touch_pages(fd, offset, length, ext);
int ms = t.millis();
if( ms > 1000 )
log() << "compact end paging in " << ms << "ms " << e->length/1000000.0/ms << "MB/sec" << endl;
}
{
log() << "compact copying records" << endl;
long long datasize = 0;
long long nrecords = 0;
DiskLoc L = e->firstRecord;
if( !L.isNull() ) {
while( 1 ) {
Record *recOld = L.rec();
L = recOld->nextInExtent(L);
BSONObj objOld = BSONObj::make(recOld);
if( !validate || objOld.valid() ) {
nrecords++;
unsigned sz = objOld.objsize();
oldObjSize += sz;
oldObjSizeWithPadding += recOld->netLength();
unsigned lenWHdr = sz + Record::HeaderSize;
unsigned lenWPadding = lenWHdr;
{
lenWPadding = static_cast<unsigned>(pf*lenWPadding);
lenWPadding += pb;
lenWPadding = lenWPadding & quantizeMask(lenWPadding);
if( lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
lenWPadding = lenWHdr;
}
}
DiskLoc loc = allocateSpaceForANewRecord(ns, d, lenWPadding, false);
uassert(14024, "compact error out of space during compaction", !loc.isNull());
Record *recNew = loc.rec();
datasize += recNew->netLength();
recNew = (Record *) getDur().writingPtr(recNew, lenWHdr);
addRecordToRecListInExtent(recNew, loc);
memcpy(recNew->data(), objOld.objdata(), sz);
{
// extract keys for all indexes we will be rebuilding
for( int x = 0; x < nidx; x++ ) {
phase1[x].addKeys(indexSpecs[x], objOld, loc);
}
}
}
else {
if( ++skipped <= 10 )
log() << "compact skipping invalid object" << endl;
}
if( L.isNull() ) {
// we just did the very last record from the old extent. it's still pointed to
// by the old extent ext, but that will be fixed below after this loop
break;
}
// remove the old records (orphan them) periodically so our commit block doesn't get too large
bool stopping = false;
RARELY stopping = *killCurrentOp.checkForInterruptNoAssert() != 0;
if( stopping || getDur().aCommitIsNeeded() ) {
e->firstRecord.writing() = L;
Record *r = L.rec();
getDur().writingInt(r->prevOfs()) = DiskLoc::NullOfs;
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt(false);
}
}
} // if !L.isNull()
verify( d->firstExtent == diskloc );
//.........这里部分代码省略.........
示例11: operator
void operator()( DBClientCursorBatchIterator &i ) {
Lock::GlobalWrite lk;
if ( context ) {
context->relocked();
}
while( i.moreInCurrentBatch() ) {
if ( n % 128 == 127 /*yield some*/ ) {
time_t now = time(0);
if( now - lastLog >= 60 ) {
// report progress
if( lastLog )
log() << "clone " << to_collection << ' ' << n << endl;
lastLog = now;
}
mayInterrupt( _mayBeInterrupted );
dbtempreleaseif t( _mayYield );
}
BSONObj tmp = i.nextSafe();
/* assure object is valid. note this will slow us down a little. */
if ( !tmp.valid() ) {
stringstream ss;
ss << "Cloner: skipping corrupt object from " << from_collection;
BSONElement e = tmp.firstElement();
try {
e.validate();
ss << " firstElement: " << e;
}
catch( ... ) {
ss << " firstElement corrupt";
}
out() << ss.str() << endl;
continue;
}
++n;
BSONObj js = tmp;
if ( isindex ) {
verify( strstr(from_collection, "system.indexes") );
js = fixindex(tmp);
storedForLater->push_back( js.getOwned() );
continue;
}
try {
// add keys for presorting
DiskLoc loc = theDataFileMgr.insertWithObjMod(to_collection, js);
loc.assertOk();
if (_sortersForIndex != NULL) {
// add key to SortersForNS
for (SortersForIndex::iterator iSorter = _sortersForIndex->begin();
iSorter != _sortersForIndex->end();
++iSorter) {
iSorter->second.preSortPhase.addKeys(iSorter->second.spec, js,
loc, false);
}
}
if ( logForRepl )
logOp("i", to_collection, js);
getDur().commitIfNeeded();
}
catch( UserException& e ) {
error() << "error: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
throw;
}
RARELY if ( time( 0 ) - saveLast > 60 ) {
log() << n << " objects cloned so far from collection " << from_collection << endl;
saveLast = time( 0 );
}
}
}
示例12: compactExtent
/** @return number of skipped (invalid) documents */
unsigned compactExtent(const char *ns, NamespaceDetails *d, const DiskLoc diskloc, int n,
int nidx, bool validate, double pf, int pb, bool useDefaultPadding,
bool preservePadding) {
log() << "compact begin extent #" << n << " for namespace " << ns << endl;
unsigned oldObjSize = 0; // we'll report what the old padding was
unsigned oldObjSizeWithPadding = 0;
Extent *e = diskloc.ext();
e->assertOk();
verify( e->validates(diskloc) );
unsigned skipped = 0;
Database* db = cc().database();
{
// the next/prev pointers within the extent might not be in order so we first
// page the whole thing in sequentially
log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
Timer t;
Extent* ext = db->getExtentManager().getExtent( diskloc );
size_t length = ext->length;
touch_pages( reinterpret_cast<const char*>(ext), length );
int ms = t.millis();
if( ms > 1000 )
log() << "compact end paging in " << ms << "ms "
<< e->length/1000000.0/ms << "MB/sec" << endl;
}
{
log() << "compact copying records" << endl;
long long datasize = 0;
long long nrecords = 0;
DiskLoc L = e->firstRecord;
if( !L.isNull() ) {
while( 1 ) {
Record *recOld = L.rec();
L = db->getExtentManager().getNextRecordInExtent(L);
BSONObj objOld = BSONObj::make(recOld);
if( !validate || objOld.valid() ) {
nrecords++;
unsigned sz = objOld.objsize();
oldObjSize += sz;
oldObjSizeWithPadding += recOld->netLength();
unsigned lenWHdr = sz + Record::HeaderSize;
unsigned lenWPadding = lenWHdr;
// if we are preserving the padding, the record should not change size
if (preservePadding) {
lenWPadding = recOld->lengthWithHeaders();
}
// maintain UsePowerOf2Sizes if no padding values were passed in
else if (d->isUserFlagSet(NamespaceDetails::Flag_UsePowerOf2Sizes)
&& useDefaultPadding) {
lenWPadding = d->quantizePowerOf2AllocationSpace(lenWPadding);
}
// otherwise use the padding values (pf and pb) that were passed in
else {
lenWPadding = static_cast<unsigned>(pf*lenWPadding);
lenWPadding += pb;
lenWPadding = lenWPadding & quantizeMask(lenWPadding);
}
if (lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
lenWPadding = lenWHdr;
}
DiskLoc loc = allocateSpaceForANewRecord(ns, d, lenWPadding, false);
uassert(14024, "compact error out of space during compaction", !loc.isNull());
Record *recNew = loc.rec();
datasize += recNew->netLength();
recNew = (Record *) getDur().writingPtr(recNew, lenWHdr);
addRecordToRecListInExtent(recNew, loc);
memcpy(recNew->data(), objOld.objdata(), sz);
}
else {
if( ++skipped <= 10 )
log() << "compact skipping invalid object" << endl;
}
if( L.isNull() ) {
// we just did the very last record from the old extent. it's still pointed to
// by the old extent ext, but that will be fixed below after this loop
break;
}
// remove the old records (orphan them) periodically so our commit block doesn't get too large
bool stopping = false;
RARELY stopping = *killCurrentOp.checkForInterruptNoAssert() != 0;
if( stopping || getDur().aCommitIsNeeded() ) {
e->firstRecord.writing() = L;
Record *r = L.rec();
getDur().writingInt(r->prevOfs()) = DiskLoc::NullOfs;
getDur().commitIfNeeded();
killCurrentOp.checkForInterrupt(false);
}
}
} // if !L.isNull()
//.........这里部分代码省略.........
示例13: validateNS
void validateNS(const string& ns,
Collection* collection,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
const bool full = cmdObj["full"].trueValue();
const bool scanData = full || cmdObj["scandata"].trueValue();
NamespaceDetails* nsd = collection->details();
bool valid = true;
BSONArrayBuilder errors; // explanation(s) for why valid = false
if ( collection->isCapped() ){
result.append("capped", nsd->isCapped());
result.appendNumber("max", nsd->maxCappedDocs());
}
if ( nsd->firstExtent().isNull() )
result.append( "firstExtent", "null" );
else
result.append( "firstExtent", str::stream() << nsd->firstExtent().toString()
<< " ns:" << nsd->firstExtent().ext()->nsDiagnostic.toString());
if ( nsd->lastExtent().isNull() )
result.append( "lastExtent", "null" );
else
result.append( "lastExtent", str::stream() << nsd->lastExtent().toString()
<< " ns:" << nsd->lastExtent().ext()->nsDiagnostic.toString());
BSONArrayBuilder extentData;
int extentCount = 0;
try {
if ( !nsd->firstExtent().isNull() ) {
nsd->firstExtent().ext()->assertOk();
nsd->lastExtent().ext()->assertOk();
}
DiskLoc extentDiskLoc = nsd->firstExtent();
while (!extentDiskLoc.isNull()) {
Extent* thisExtent = extentDiskLoc.ext();
if (full) {
extentData << thisExtent->dump();
}
if (!thisExtent->validates(extentDiskLoc, &errors)) {
valid = false;
}
DiskLoc nextDiskLoc = thisExtent->xnext;
if (extentCount > 0 && !nextDiskLoc.isNull()
&& nextDiskLoc.ext()->xprev != extentDiskLoc) {
StringBuilder sb;
sb << "'xprev' pointer " << nextDiskLoc.ext()->xprev.toString()
<< " in extent " << nextDiskLoc.toString()
<< " does not point to extent " << extentDiskLoc.toString();
errors << sb.str();
valid = false;
}
if (nextDiskLoc.isNull() && extentDiskLoc != nsd->lastExtent()) {
StringBuilder sb;
sb << "'lastExtent' pointer " << nsd->lastExtent().toString()
<< " does not point to last extent in list " << extentDiskLoc.toString();
errors << sb.str();
valid = false;
}
extentDiskLoc = nextDiskLoc;
extentCount++;
killCurrentOp.checkForInterrupt();
}
}
catch (const DBException& e) {
StringBuilder sb;
sb << "exception validating extent " << extentCount
<< ": " << e.what();
errors << sb.str();
valid = false;
}
result.append("extentCount", extentCount);
if ( full )
result.appendArray( "extents" , extentData.arr() );
result.appendNumber("datasize", nsd->dataSize());
result.appendNumber("nrecords", nsd->numRecords());
result.appendNumber("lastExtentSize", nsd->lastExtentSize());
result.appendNumber("padding", nsd->paddingFactor());
try {
bool testingLastExtent = false;
try {
if (nsd->firstExtent().isNull()) {
// this is ok
}
else {
result.append("firstExtentDetails", nsd->firstExtent().ext()->dump());
if (!nsd->firstExtent().ext()->xprev.isNull()) {
StringBuilder sb;
sb << "'xprev' pointer in 'firstExtent' " << nsd->firstExtent().toString()
<< " is " << nsd->firstExtent().ext()->xprev.toString()
<< ", should be null";
errors << sb.str();
//.........这里部分代码省略.........
示例14: _repairExtent
DiskLoc _repairExtent( Database* db , string ns, bool forward , DiskLoc eLoc , Writer& w ){
LogIndentLevel lil;
if ( eLoc.getOfs() <= 0 ){
toolError() << "invalid extent ofs: " << eLoc.getOfs() << std::endl;
return DiskLoc();
}
const ExtentManager& extentManager = db->getExtentManager();
Extent* e = extentManager.getExtent( eLoc, false );
if ( ! e->isOk() ){
toolError() << "Extent not ok magic: " << e->magic << " going to try to continue"
<< std::endl;
}
toolInfoLog() << "length:" << e->length << std::endl;
LogIndentLevel lil2;
set<DiskLoc> seen;
DiskLoc loc = forward ? e->firstRecord : e->lastRecord;
while ( ! loc.isNull() ){
if ( ! seen.insert( loc ).second ) {
toolError() << "infinite loop in extent, seen: " << loc << " before" << std::endl;
break;
}
if ( loc.getOfs() <= 0 ){
toolError() << "offset is 0 for record which should be impossible" << std::endl;
break;
}
if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1))) {
toolInfoLog() << loc << std::endl;
}
BSONObj obj;
try {
obj = loc.obj();
verify( obj.valid() );
if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1))) {
toolInfoLog() << obj << std::endl;
}
w( obj );
}
catch ( std::exception& e ) {
toolError() << "found invalid document @ " << loc << " " << e.what() << std::endl;
if ( ! obj.isEmpty() ) {
try {
BSONElement e = obj.firstElement();
stringstream ss;
ss << "first element: " << e;
toolError() << ss.str() << std::endl;
}
catch ( std::exception& ) {
toolError() << "unable to log invalid document @ " << loc << std::endl;
}
}
}
loc = forward ?
extentManager.getNextRecordInExtent( loc )
: extentManager.getPrevRecordInExtent( loc );
// break when new loc is outside current extent boundary
if ( loc.isNull() ) {
break;
}
}
toolInfoLog() << "wrote " << seen.size() << " documents" << std::endl;
return forward ? e->xnext : e->xprev;
}
示例15: validateNS
void validateNS(const char *ns, NamespaceDetails *d, const BSONObj& cmdObj, BSONObjBuilder& result) {
const bool full = cmdObj["full"].trueValue();
const bool scanData = full || cmdObj["scandata"].trueValue();
bool valid = true;
BSONArrayBuilder errors; // explanation(s) for why valid = false
if ( d->isCapped() ){
result.append("capped", d->isCapped());
result.appendNumber("max", d->maxCappedDocs());
}
result.append("firstExtent", str::stream() << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString());
result.append( "lastExtent", str::stream() << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString());
BSONArrayBuilder extentData;
try {
d->firstExtent.ext()->assertOk();
d->lastExtent.ext()->assertOk();
DiskLoc el = d->firstExtent;
int ne = 0;
while( !el.isNull() ) {
Extent *e = el.ext();
e->assertOk();
el = e->xnext;
ne++;
if ( full )
extentData << e->dump();
killCurrentOp.checkForInterrupt();
}
result.append("extentCount", ne);
}
catch (...) {
valid=false;
errors << "extent asserted";
}
if ( full )
result.appendArray( "extents" , extentData.arr() );
result.appendNumber("datasize", d->stats.datasize);
result.appendNumber("nrecords", d->stats.nrecords);
result.appendNumber("lastExtentSize", d->lastExtentSize);
result.appendNumber("padding", d->paddingFactor());
try {
try {
result.append("firstExtentDetails", d->firstExtent.ext()->dump());
valid = valid && d->firstExtent.ext()->validates() &&
d->firstExtent.ext()->xprev.isNull();
}
catch (...) {
errors << "exception firstextent";
valid = false;
}
set<DiskLoc> recs;
if( scanData ) {
shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
int n = 0;
int nInvalid = 0;
long long len = 0;
long long nlen = 0;
int outOfOrder = 0;
DiskLoc cl_last;
while ( c->ok() ) {
n++;
DiskLoc cl = c->currLoc();
if ( n < 1000000 )
recs.insert(cl);
if ( d->isCapped() ) {
if ( cl < cl_last )
outOfOrder++;
cl_last = cl;
}
Record *r = c->_current();
len += r->lengthWithHeaders();
nlen += r->netLength();
if (full){
BSONObj obj = BSONObj::make(r);
if (!obj.isValid() || !obj.valid()){ // both fast and deep checks
valid = false;
if (nInvalid == 0) // only log once;
errors << "invalid bson object detected (see logs for more info)";
nInvalid++;
if (strcmp("_id", obj.firstElementFieldName()) == 0){
try {
obj.firstElement().validate(); // throws on error
log() << "Invalid bson detected in " << ns << " with _id: " << obj.firstElement().toString(false) << endl;
}
//.........这里部分代码省略.........