本文整理汇总了C++中namespacedetails::IndexIterator::next方法的典型用法代码示例。如果您正苦于以下问题:C++ IndexIterator::next方法的具体用法?C++ IndexIterator::next怎么用?C++ IndexIterator::next使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类namespacedetails::IndexIterator
的用法示例。
在下文中一共展示了IndexIterator::next方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ensureIndex
void Helpers::ensureIndex(const char *ns, BSONObj keyPattern, bool unique, const char *name) {
NamespaceDetails *d = nsdetails(ns);
if( d == 0 )
return;
{
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() ) {
if( i.next().keyPattern().woCompare(keyPattern) == 0 )
return;
}
}
if( d->nIndexes >= NamespaceDetails::NIndexesMax ) {
problem() << "Helper::ensureIndex fails, MaxIndexes exceeded " << ns << '\n';
return;
}
string system_indexes = cc().database()->name + ".system.indexes";
BSONObjBuilder b;
b.append("name", name);
b.append("ns", ns);
b.append("key", keyPattern);
b.appendBool("unique", unique);
BSONObj o = b.done();
theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize());
}
示例2: ensureHaveIdIndex
void ensureHaveIdIndex(const char* ns, bool mayInterrupt) {
NamespaceDetails *d = nsdetails(ns);
if ( d == 0 || d->isSystemFlagSet(NamespaceDetails::Flag_HaveIdIndex) )
return;
d->setSystemFlag( NamespaceDetails::Flag_HaveIdIndex );
{
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() ) {
if( i.next().isIdIndex() )
return;
}
}
string system_indexes = cc().database()->name() + ".system.indexes";
BSONObjBuilder b;
b.append("name", "_id_");
b.append("ns", ns);
b.append("key", id_obj);
BSONObj o = b.done();
/* edge case: note the insert could fail if we have hit maxindexes already */
theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize(), mayInterrupt, true);
}
示例3: touch
bool touch( std::string& ns,
std::string& errmsg,
bool touch_data,
bool touch_indexes,
BSONObjBuilder& result ) {
if (touch_data) {
log() << "touching namespace " << ns << endl;
touchNs( ns );
log() << "touching namespace " << ns << " complete" << endl;
}
if (touch_indexes) {
// enumerate indexes
std::vector< std::string > indexes;
{
Client::ReadContext ctx(ns);
NamespaceDetails *nsd = nsdetails(ns);
massert( 16153, "namespace does not exist", nsd );
NamespaceDetails::IndexIterator ii = nsd->ii();
while ( ii.more() ) {
IndexDetails& idx = ii.next();
indexes.push_back( idx.indexNamespace() );
}
}
for ( std::vector<std::string>::const_iterator it = indexes.begin();
it != indexes.end();
it++ ) {
touchNs( *it );
}
}
return true;
}
示例4: touch
bool touch( std::string& ns,
std::string& errmsg,
bool touch_data,
bool touch_indexes,
BSONObjBuilder& result ) {
if (touch_data) {
log() << "touching namespace " << ns << endl;
Timer t;
int numRanges = touchNs( ns );
result.append( "data", BSON( "numRanges" << numRanges <<
"millis" << t.millis() ) );
log() << "touching namespace " << ns << " complete" << endl;
}
if (touch_indexes) {
Timer t;
// enumerate indexes
std::vector< std::string > indexes;
{
Client::ReadContext ctx(ns);
NamespaceDetails *nsd = nsdetails(ns);
massert( 16153, "namespace does not exist", nsd );
NamespaceDetails::IndexIterator ii = nsd->ii();
while ( ii.more() ) {
IndexDetails& idx = ii.next();
indexes.push_back( idx.indexNamespace() );
}
}
int numRanges = 0;
for ( std::vector<std::string>::const_iterator it = indexes.begin();
it != indexes.end();
it++ ) {
numRanges += touchNs( *it );
}
result.append( "indexes", BSON( "num" << static_cast<int>(indexes.size()) <<
"numRanges" << numRanges <<
"millis" << t.millis() ) );
}
return true;
}
示例5: computeIndexKeys
void CollectionInfoCache::computeIndexKeys() {
DEV Lock::assertWriteLocked( _collection->ns().ns() );
_indexedPaths.clear();
NamespaceDetails::IndexIterator i = _collection->details()->ii( true );
while( i.more() ) {
BSONObj key = i.next().keyPattern();
BSONObjIterator j( key );
while ( j.more() ) {
BSONElement e = j.next();
_indexedPaths.addPath( e.fieldName() );
}
}
_keysComputed = true;
}
示例6: runInternal
/**
* Run analysis with the provided parameters. See IndexStatsCmd for in-depth expanation of
* output.
*
* @return true on success, false otherwise
*/
bool runInternal(const NamespaceDetails* nsd, IndexStatsParams params, string& errmsg,
BSONObjBuilder& result) {
const IndexDetails* details = NULL;
// casting away const, we are not going to modify NamespaceDetails
// but ii() is not marked const, see SERVER-7619
for (NamespaceDetails::IndexIterator it = const_cast<NamespaceDetails*>(nsd)->ii();
it.more();) {
IndexDetails& cur = it.next();
if (cur.indexName() == params.indexName) details = &cur;
}
if (details == NULL) {
errmsg = "the requested index does not exist";
return false;
}
result << "index" << details->indexName()
<< "version" << details->version()
<< "isIdIndex" << details->isIdIndex()
<< "keyPattern" << details->keyPattern()
<< "storageNs" << details->indexNamespace();
scoped_ptr<BtreeInspector> inspector(NULL);
switch (details->version()) {
case 1: inspector.reset(new BtreeInspectorV1(params.expandNodes)); break;
case 0: inspector.reset(new BtreeInspectorV0(params.expandNodes)); break;
default:
errmsg = str::stream() << "index version " << details->version() << " is "
<< "not supported";
return false;
}
inspector->inspect(details->head);
inspector->stats().appendTo(result);
return true;
}
示例7: _compact
bool _compact(const char *ns, NamespaceDetails *d, string& errmsg, bool validate, BSONObjBuilder& result, double pf, int pb) {
//int les = d->lastExtentSize;
// this is a big job, so might as well make things tidy before we start just to be nice.
getDur().commitNow();
list<DiskLoc> extents;
for( DiskLoc L = d->firstExtent; !L.isNull(); L = L.ext()->xnext )
extents.push_back(L);
log() << "compact " << extents.size() << " extents" << endl;
ProgressMeterHolder pm( cc().curop()->setMessage( "compact extent" , extents.size() ) );
// same data, but might perform a little different after compact?
NamespaceDetailsTransient::get(ns).clearQueryCache();
int nidx = d->nIndexes;
scoped_array<IndexSpec> indexSpecs( new IndexSpec[nidx] );
scoped_array<SortPhaseOne> phase1( new SortPhaseOne[nidx] );
{
NamespaceDetails::IndexIterator ii = d->ii();
int x = 0;
while( ii.more() ) {
BSONObjBuilder b;
IndexDetails& idx = ii.next();
BSONObj::iterator i(idx.info.obj());
while( i.more() ) {
BSONElement e = i.next();
if( !str::equals(e.fieldName(), "v") && !str::equals(e.fieldName(), "background") ) {
b.append(e);
}
}
BSONObj o = b.obj().getOwned();
phase1[x].sorter.reset( new BSONObjExternalSorter( idx.idxInterface(), o.getObjectField("key") ) );
phase1[x].sorter->hintNumObjects( d->stats.nrecords );
indexSpecs[x++].reset(o);
}
}
log() << "compact orphan deleted lists" << endl;
for( int i = 0; i < Buckets; i++ ) {
d->deletedList[i].writing().Null();
}
// before dropping indexes, at least make sure we can allocate one extent!
uassert(14025, "compact error no space available to allocate", !allocateSpaceForANewRecord(ns, d, Record::HeaderSize+1, false).isNull());
// note that the drop indexes call also invalidates all clientcursors for the namespace, which is important and wanted here
log() << "compact dropping indexes" << endl;
BSONObjBuilder b;
if( !dropIndexes(d, ns, "*", errmsg, b, true) ) {
errmsg = "compact drop indexes failed";
log() << errmsg << endl;
return false;
}
getDur().commitNow();
long long skipped = 0;
int n = 0;
for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
skipped += compactExtent(ns, d, *i, n++, indexSpecs, phase1, nidx, validate, pf, pb);
pm.hit();
}
if( skipped ) {
result.append("invalidObjects", skipped);
}
assert( d->firstExtent.ext()->xprev.isNull() );
// indexes will do their own progress meter?
pm.finished();
// build indexes
NamespaceString s(ns);
string si = s.db + ".system.indexes";
for( int i = 0; i < nidx; i++ ) {
killCurrentOp.checkForInterrupt(false);
BSONObj info = indexSpecs[i].info;
log() << "compact create index " << info["key"].Obj().toString() << endl;
try {
precalced = &phase1[i];
theDataFileMgr.insert(si.c_str(), info.objdata(), info.objsize());
}
catch(...) {
precalced = 0;
throw;
}
precalced = 0;
}
return true;
}
示例8: validateNS
string validateNS(const char *ns, NamespaceDetails *d, BSONObj *cmdObj) {
bool scanData = true;
if( cmdObj && cmdObj->hasElement("scandata") && !cmdObj->getBoolField("scandata") )
scanData = false;
bool valid = true;
stringstream ss;
ss << "\nvalidate\n";
//ss << " details: " << hex << d << " ofs:" << nsindex(ns)->detailsOffset(d) << dec << endl;
if ( d->capped )
ss << " capped:" << d->capped << " max:" << d->max << '\n';
ss << " firstExtent:" << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString()<< '\n';
ss << " lastExtent:" << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString() << '\n';
try {
d->firstExtent.ext()->assertOk();
d->lastExtent.ext()->assertOk();
DiskLoc el = d->firstExtent;
int ne = 0;
while( !el.isNull() ) {
Extent *e = el.ext();
e->assertOk();
el = e->xnext;
ne++;
killCurrentOp.checkForInterrupt();
}
ss << " # extents:" << ne << '\n';
}
catch (...) {
valid=false;
ss << " extent asserted ";
}
ss << " datasize?:" << d->stats.datasize << " nrecords?:" << d->stats.nrecords << " lastExtentSize:" << d->lastExtentSize << '\n';
ss << " padding:" << d->paddingFactor << '\n';
try {
try {
ss << " first extent:\n";
d->firstExtent.ext()->dump(ss);
valid = valid && d->firstExtent.ext()->validates();
}
catch (...) {
ss << "\n exception firstextent\n" << endl;
}
set<DiskLoc> recs;
if( scanData ) {
shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
int n = 0;
long long len = 0;
long long nlen = 0;
int outOfOrder = 0;
DiskLoc cl_last;
while ( c->ok() ) {
n++;
DiskLoc cl = c->currLoc();
if ( n < 1000000 )
recs.insert(cl);
if ( d->capped ) {
if ( cl < cl_last )
outOfOrder++;
cl_last = cl;
}
Record *r = c->_current();
len += r->lengthWithHeaders;
nlen += r->netLength();
c->advance();
}
if ( d->capped && !d->capLooped() ) {
ss << " capped outOfOrder:" << outOfOrder;
if ( outOfOrder > 1 ) {
valid = false;
ss << " ???";
}
else ss << " (OK)";
ss << '\n';
}
ss << " " << n << " objects found, nobj:" << d->stats.nrecords << '\n';
ss << " " << len << " bytes data w/headers\n";
ss << " " << nlen << " bytes data wout/headers\n";
}
ss << " deletedList: ";
for ( int i = 0; i < Buckets; i++ ) {
ss << (d->deletedList[i].isNull() ? '0' : '1');
}
ss << endl;
int ndel = 0;
long long delSize = 0;
int incorrect = 0;
for ( int i = 0; i < Buckets; i++ ) {
DiskLoc loc = d->deletedList[i];
try {
int k = 0;
while ( !loc.isNull() ) {
if ( recs.count(loc) )
incorrect++;
//.........这里部分代码省略.........
示例9: _compact
bool _compact(const char *ns, NamespaceDetails *d, string& errmsg, bool validate,
BSONObjBuilder& result, double pf, int pb, bool useDefaultPadding,
bool preservePadding) {
// this is a big job, so might as well make things tidy before we start just to be nice.
getDur().commitIfNeeded();
list<DiskLoc> extents;
for( DiskLoc L = d->firstExtent(); !L.isNull(); L = L.ext()->xnext )
extents.push_back(L);
log() << "compact " << extents.size() << " extents" << endl;
ProgressMeterHolder pm(cc().curop()->setMessage("compact extent",
"Extent Compacting Progress",
extents.size()));
// same data, but might perform a little different after compact?
Collection* collection = cc().database()->getCollection( ns );
verify( collection );
collection->infoCache()->addedIndex();
verify( d->getCompletedIndexCount() == d->getTotalIndexCount() );
int nidx = d->getCompletedIndexCount();
scoped_array<BSONObj> indexSpecs( new BSONObj[nidx] );
{
NamespaceDetails::IndexIterator ii = d->ii();
// For each existing index...
for( int idxNo = 0; ii.more(); ++idxNo ) {
// Build a new index spec based on the old index spec.
BSONObjBuilder b;
BSONObj::iterator i(ii.next().info.obj());
while( i.more() ) {
BSONElement e = i.next();
if ( str::equals( e.fieldName(), "v" ) ) {
// Drop any preexisting index version spec. The default index version will
// be used instead for the new index.
continue;
}
if ( str::equals( e.fieldName(), "background" ) ) {
// Create the new index in the foreground.
continue;
}
// Pass the element through to the new index spec.
b.append(e);
}
indexSpecs[idxNo] = b.obj().getOwned();
}
}
log() << "compact orphan deleted lists" << endl;
d->orphanDeletedList();
// Start over from scratch with our extent sizing and growth
d->setLastExtentSize( 0 );
// before dropping indexes, at least make sure we can allocate one extent!
uassert(14025, "compact error no space available to allocate", !allocateSpaceForANewRecord(ns, d, Record::HeaderSize+1, false).isNull());
// note that the drop indexes call also invalidates all clientcursors for the namespace, which is important and wanted here
log() << "compact dropping indexes" << endl;
Status status = collection->getIndexCatalog()->dropAllIndexes( true );
if ( !status.isOK() ) {
errmsg = str::stream() << "compact drop indexes failed: " << status.toString();
log() << status.toString() << endl;
return false;
}
getDur().commitIfNeeded();
long long skipped = 0;
int n = 0;
// reset data size and record counts to 0 for this namespace
// as we're about to tally them up again for each new extent
d->setStats( 0, 0 );
for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
skipped += compactExtent(ns, d, *i, n++, nidx, validate, pf, pb,
useDefaultPadding, preservePadding);
pm.hit();
}
if( skipped ) {
result.append("invalidObjects", skipped);
}
verify( d->firstExtent().ext()->xprev.isNull() );
// indexes will do their own progress meter?
pm.finished();
// build indexes
for( int i = 0; i < nidx; i++ ) {
killCurrentOp.checkForInterrupt(false);
BSONObj info = indexSpecs[i];
log() << "compact create index " << info["key"].Obj().toString() << endl;
Status status = collection->getIndexCatalog()->createIndex( info, false );
if ( !status.isOK() ) {
log() << "failed to create index: " << status.toString();
uassertStatusOK( status );
}
//.........这里部分代码省略.........
示例10: validateNS
void validateNS(const string& ns,
Collection* collection,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
const bool full = cmdObj["full"].trueValue();
const bool scanData = full || cmdObj["scandata"].trueValue();
NamespaceDetails* nsd = collection->details();
bool valid = true;
BSONArrayBuilder errors; // explanation(s) for why valid = false
if ( collection->isCapped() ){
result.append("capped", nsd->isCapped());
result.appendNumber("max", nsd->maxCappedDocs());
}
if ( nsd->firstExtent().isNull() )
result.append( "firstExtent", "null" );
else
result.append( "firstExtent", str::stream() << nsd->firstExtent().toString()
<< " ns:" << nsd->firstExtent().ext()->nsDiagnostic.toString());
if ( nsd->lastExtent().isNull() )
result.append( "lastExtent", "null" );
else
result.append( "lastExtent", str::stream() << nsd->lastExtent().toString()
<< " ns:" << nsd->lastExtent().ext()->nsDiagnostic.toString());
BSONArrayBuilder extentData;
int extentCount = 0;
try {
if ( !nsd->firstExtent().isNull() ) {
nsd->firstExtent().ext()->assertOk();
nsd->lastExtent().ext()->assertOk();
}
DiskLoc extentDiskLoc = nsd->firstExtent();
while (!extentDiskLoc.isNull()) {
Extent* thisExtent = extentDiskLoc.ext();
if (full) {
extentData << thisExtent->dump();
}
if (!thisExtent->validates(extentDiskLoc, &errors)) {
valid = false;
}
DiskLoc nextDiskLoc = thisExtent->xnext;
if (extentCount > 0 && !nextDiskLoc.isNull()
&& nextDiskLoc.ext()->xprev != extentDiskLoc) {
StringBuilder sb;
sb << "'xprev' pointer " << nextDiskLoc.ext()->xprev.toString()
<< " in extent " << nextDiskLoc.toString()
<< " does not point to extent " << extentDiskLoc.toString();
errors << sb.str();
valid = false;
}
if (nextDiskLoc.isNull() && extentDiskLoc != nsd->lastExtent()) {
StringBuilder sb;
sb << "'lastExtent' pointer " << nsd->lastExtent().toString()
<< " does not point to last extent in list " << extentDiskLoc.toString();
errors << sb.str();
valid = false;
}
extentDiskLoc = nextDiskLoc;
extentCount++;
killCurrentOp.checkForInterrupt();
}
}
catch (const DBException& e) {
StringBuilder sb;
sb << "exception validating extent " << extentCount
<< ": " << e.what();
errors << sb.str();
valid = false;
}
result.append("extentCount", extentCount);
if ( full )
result.appendArray( "extents" , extentData.arr() );
result.appendNumber("datasize", nsd->dataSize());
result.appendNumber("nrecords", nsd->numRecords());
result.appendNumber("lastExtentSize", nsd->lastExtentSize());
result.appendNumber("padding", nsd->paddingFactor());
try {
bool testingLastExtent = false;
try {
if (nsd->firstExtent().isNull()) {
// this is ok
}
else {
result.append("firstExtentDetails", nsd->firstExtent().ext()->dump());
if (!nsd->firstExtent().ext()->xprev.isNull()) {
StringBuilder sb;
sb << "'xprev' pointer in 'firstExtent' " << nsd->firstExtent().toString()
<< " is " << nsd->firstExtent().ext()->xprev.toString()
<< ", should be null";
errors << sb.str();
//.........这里部分代码省略.........
示例11: init
void QueryPlanSet::init() {
DEBUGQO( "QueryPlanSet::init " << ns << "\t" << _originalQuery );
_plans.clear();
_mayRecordPlan = true;
_usingPrerecordedPlan = false;
const char *ns = _frsp->ns();
NamespaceDetails *d = nsdetails( ns );
if ( !d || !_frsp->matchPossible() ) {
// Table scan plan, when no matches are possible
_plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, *_originalFrsp, _originalQuery, _order ) ) );
return;
}
BSONElement hint = _hint.firstElement();
if ( !hint.eoo() ) {
_mayRecordPlan = false;
IndexDetails *id = parseHint( hint, d );
if ( id ) {
addHint( *id );
}
else {
massert( 10366 , "natural order cannot be specified with $min/$max", _min.isEmpty() && _max.isEmpty() );
// Table scan plan
_plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, *_originalFrsp, _originalQuery, _order ) ) );
}
return;
}
if ( !_min.isEmpty() || !_max.isEmpty() ) {
string errmsg;
BSONObj keyPattern;
IndexDetails *idx = indexDetailsForRange( ns, errmsg, _min, _max, keyPattern );
massert( 10367 , errmsg, idx );
_plans.push_back( QueryPlanPtr( new QueryPlan( d, d->idxNo(*idx), *_frsp, *_originalFrsp, _originalQuery, _order, _min, _max ) ) );
return;
}
if ( isSimpleIdQuery( _originalQuery ) ) {
int idx = d->findIdIndex();
if ( idx >= 0 ) {
_usingPrerecordedPlan = true;
_mayRecordPlan = false;
_plans.push_back( QueryPlanPtr( new QueryPlan( d , idx , *_frsp , *_originalFrsp , _originalQuery, _order ) ) );
return;
}
}
if ( _originalQuery.isEmpty() && _order.isEmpty() ) {
_plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, *_originalFrsp, _originalQuery, _order ) ) );
return;
}
DEBUGQO( "\t special : " << _frsp->getSpecial() );
if ( _frsp->getSpecial().size() ) {
_special = _frsp->getSpecial();
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() ) {
int j = i.pos();
IndexDetails& ii = i.next();
const IndexSpec& spec = ii.getSpec();
if ( spec.getTypeName() == _special && spec.suitability( _originalQuery , _order ) ) {
_usingPrerecordedPlan = true;
_mayRecordPlan = false;
_plans.push_back( QueryPlanPtr( new QueryPlan( d , j , *_frsp , *_originalFrsp , _originalQuery, _order ,
BSONObj() , BSONObj() , _special ) ) );
return;
}
}
uassert( 13038 , (string)"can't find special index: " + _special + " for: " + _originalQuery.toString() , 0 );
}
if ( _honorRecordedPlan ) {
pair< BSONObj, long long > best = QueryUtilIndexed::bestIndexForPatterns( *_frsp, _order );
BSONObj bestIndex = best.first;
long long oldNScanned = best.second;
if ( !bestIndex.isEmpty() ) {
QueryPlanPtr p;
_oldNScanned = oldNScanned;
if ( !strcmp( bestIndex.firstElement().fieldName(), "$natural" ) ) {
// Table scan plan
p.reset( new QueryPlan( d, -1, *_frsp, *_originalFrsp, _originalQuery, _order ) );
}
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() ) {
int j = i.pos();
IndexDetails& ii = i.next();
if( ii.keyPattern().woCompare(bestIndex) == 0 ) {
p.reset( new QueryPlan( d, j, *_frsp, *_originalFrsp, _originalQuery, _order ) );
}
}
massert( 10368 , "Unable to locate previously recorded index", p.get() );
if ( !( _bestGuessOnly && p->scanAndOrderRequired() ) ) {
_usingPrerecordedPlan = true;
_mayRecordPlan = false;
_plans.push_back( p );
return;
}
//.........这里部分代码省略.........
示例12: run
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
Timer t;
string ns = dbname + '.' + cmdObj.firstElement().valuestr();
string key = cmdObj["key"].valuestrsafe();
BSONObj keyPattern = BSON( key << 1 );
BSONObj query = getQuery( cmdObj );
int bufSize = BSONObjMaxUserSize - 4096;
BufBuilder bb( bufSize );
char * start = bb.buf();
BSONArrayBuilder arr( bb );
BSONElementSet values;
long long nscanned = 0; // locations looked at
long long nscannedObjects = 0; // full objects looked at
long long n = 0; // matches
MatchDetails md;
NamespaceDetails * d = nsdetails( ns );
if ( ! d ) {
result.appendArray( "values" , BSONObj() );
result.append( "stats" , BSON( "n" << 0 << "nscanned" << 0 << "nscannedObjects" << 0 ) );
return true;
}
shared_ptr<Cursor> cursor;
if ( ! query.isEmpty() ) {
cursor = getOptimizedCursor( ns.c_str(), query, BSONObj() );
}
else {
// query is empty, so lets see if we can find an index
// with the key so we don't have to hit the raw data
NamespaceDetails::IndexIterator ii = d->ii();
while ( ii.more() ) {
IndexDetails& idx = ii.next();
if ( d->isMultikey( ii.pos() - 1 ) )
continue;
if ( idx.inKeyPattern( key ) ) {
cursor = getBestGuessCursor( ns.c_str(), BSONObj(), idx.keyPattern() );
if( cursor.get() ) break;
}
}
if ( ! cursor.get() )
cursor = getOptimizedCursor(ns.c_str() , query , BSONObj() );
}
verify( cursor );
string cursorName = cursor->toString();
auto_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns));
// map from indexed field to offset in key object
map<string, int> indexedFields;
if (!cursor->modifiedKeys()) {
// store index information so we can decide if we can
// get something out of the index key rather than full object
int x = 0;
BSONObjIterator i( cursor->indexKeyPattern() );
while ( i.more() ) {
BSONElement e = i.next();
if ( e.isNumber() ) {
// only want basic index fields, not "2d" etc
indexedFields[e.fieldName()] = x;
}
x++;
}
}
while ( cursor->ok() ) {
nscanned++;
bool loadedRecord = false;
if ( cursor->currentMatches( &md ) && !cursor->getsetdup( cursor->currLoc() ) ) {
n++;
BSONObj holder;
BSONElementSet temp;
// Try to get the record from the key fields.
loadedRecord = !getFieldsDotted(indexedFields, cursor, key, temp, holder);
for ( BSONElementSet::iterator i=temp.begin(); i!=temp.end(); ++i ) {
BSONElement e = *i;
if ( values.count( e ) )
continue;
int now = bb.len();
uassert(10044, "distinct too big, 16mb cap", ( now + e.size() + 1024 ) < bufSize );
//.........这里部分代码省略.........
示例13: validateNS
void validateNS(const char *ns, NamespaceDetails *d, const BSONObj& cmdObj, BSONObjBuilder& result) {
const bool full = cmdObj["full"].trueValue();
const bool scanData = full || cmdObj["scandata"].trueValue();
bool valid = true;
BSONArrayBuilder errors; // explanation(s) for why valid = false
if ( d->isCapped() ){
result.append("capped", d->isCapped());
result.appendNumber("max", d->maxCappedDocs());
}
result.append("firstExtent", str::stream() << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString());
result.append( "lastExtent", str::stream() << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString());
BSONArrayBuilder extentData;
try {
d->firstExtent.ext()->assertOk();
d->lastExtent.ext()->assertOk();
DiskLoc el = d->firstExtent;
int ne = 0;
while( !el.isNull() ) {
Extent *e = el.ext();
e->assertOk();
el = e->xnext;
ne++;
if ( full )
extentData << e->dump();
killCurrentOp.checkForInterrupt();
}
result.append("extentCount", ne);
}
catch (...) {
valid=false;
errors << "extent asserted";
}
if ( full )
result.appendArray( "extents" , extentData.arr() );
result.appendNumber("datasize", d->stats.datasize);
result.appendNumber("nrecords", d->stats.nrecords);
result.appendNumber("lastExtentSize", d->lastExtentSize);
result.appendNumber("padding", d->paddingFactor());
try {
try {
result.append("firstExtentDetails", d->firstExtent.ext()->dump());
valid = valid && d->firstExtent.ext()->validates() &&
d->firstExtent.ext()->xprev.isNull();
}
catch (...) {
errors << "exception firstextent";
valid = false;
}
set<DiskLoc> recs;
if( scanData ) {
shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
int n = 0;
int nInvalid = 0;
long long len = 0;
long long nlen = 0;
int outOfOrder = 0;
DiskLoc cl_last;
while ( c->ok() ) {
n++;
DiskLoc cl = c->currLoc();
if ( n < 1000000 )
recs.insert(cl);
if ( d->isCapped() ) {
if ( cl < cl_last )
outOfOrder++;
cl_last = cl;
}
Record *r = c->_current();
len += r->lengthWithHeaders();
nlen += r->netLength();
if (full){
BSONObj obj = BSONObj::make(r);
if (!obj.isValid() || !obj.valid()){ // both fast and deep checks
valid = false;
if (nInvalid == 0) // only log once;
errors << "invalid bson object detected (see logs for more info)";
nInvalid++;
if (strcmp("_id", obj.firstElementFieldName()) == 0){
try {
obj.firstElement().validate(); // throws on error
log() << "Invalid bson detected in " << ns << " with _id: " << obj.firstElement().toString(false) << endl;
}
//.........这里部分代码省略.........
示例14: _compact
bool _compact(const char *ns, NamespaceDetails *d, string& errmsg, bool validate, BSONObjBuilder& result, double pf, int pb) {
// this is a big job, so might as well make things tidy before we start just to be nice.
getDur().commitIfNeeded();
list<DiskLoc> extents;
for( DiskLoc L = d->firstExtent; !L.isNull(); L = L.ext()->xnext )
extents.push_back(L);
log() << "compact " << extents.size() << " extents" << endl;
ProgressMeterHolder pm( cc().curop()->setMessage( "compact extent" , extents.size() ) );
// same data, but might perform a little different after compact?
NamespaceDetailsTransient::get(ns).clearQueryCache();
int nidx = d->nIndexes;
scoped_array<IndexSpec> indexSpecs( new IndexSpec[nidx] );
scoped_array<SortPhaseOne> phase1( new SortPhaseOne[nidx] );
{
NamespaceDetails::IndexIterator ii = d->ii();
// For each existing index...
for( int idxNo = 0; ii.more(); ++idxNo ) {
// Build a new index spec based on the old index spec.
BSONObjBuilder b;
BSONObj::iterator i(ii.next().info.obj());
while( i.more() ) {
BSONElement e = i.next();
if ( str::equals( e.fieldName(), "v" ) ) {
// Drop any preexisting index version spec. The default index version will
// be used instead for the new index.
continue;
}
if ( str::equals( e.fieldName(), "background" ) ) {
// Create the new index in the foreground.
continue;
}
// Pass the element through to the new index spec.
b.append(e);
}
// Add the new index spec to 'indexSpecs'.
BSONObj o = b.obj().getOwned();
indexSpecs[idxNo].reset(o);
// Create an external sorter.
phase1[idxNo].sorter.reset
( new BSONObjExternalSorter
// Use the default index interface, since the new index will be created
// with the default index version.
( IndexInterface::defaultVersion(),
o.getObjectField("key") ) );
phase1[idxNo].sorter->hintNumObjects( d->stats.nrecords );
}
}
log() << "compact orphan deleted lists" << endl;
for( int i = 0; i < Buckets; i++ ) {
d->deletedList[i].writing().Null();
}
// Start over from scratch with our extent sizing and growth
d->lastExtentSize=0;
// before dropping indexes, at least make sure we can allocate one extent!
uassert(14025, "compact error no space available to allocate", !allocateSpaceForANewRecord(ns, d, Record::HeaderSize+1, false).isNull());
// note that the drop indexes call also invalidates all clientcursors for the namespace, which is important and wanted here
log() << "compact dropping indexes" << endl;
BSONObjBuilder b;
if( !dropIndexes(d, ns, "*", errmsg, b, true) ) {
errmsg = "compact drop indexes failed";
log() << errmsg << endl;
return false;
}
getDur().commitIfNeeded();
long long skipped = 0;
int n = 0;
// reset data size and record counts to 0 for this namespace
// as we're about to tally them up again for each new extent
{
NamespaceDetails::Stats *s = getDur().writing(&d->stats);
s->datasize = 0;
s->nrecords = 0;
}
for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
skipped += compactExtent(ns, d, *i, n++, indexSpecs, phase1, nidx, validate, pf, pb);
pm.hit();
}
if( skipped ) {
result.append("invalidObjects", skipped);
}
verify( d->firstExtent.ext()->xprev.isNull() );
// indexes will do their own progress meter?
pm.finished();
//.........这里部分代码省略.........
示例15: run
bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
Timer t;
string ns = dbname + '.' + cmdObj.firstElement().valuestr();
string key = cmdObj["key"].valuestrsafe();
BSONObj keyPattern = BSON( key << 1 );
BSONObj query = getQuery( cmdObj );
int bufSize = BSONObjMaxUserSize - 4096;
BufBuilder bb( bufSize );
char * start = bb.buf();
BSONArrayBuilder arr( bb );
BSONElementSet values;
long long nscanned = 0; // locations looked at
long long nscannedObjects = 0; // full objects looked at
long long n = 0; // matches
MatchDetails md;
NamespaceDetails * d = nsdetails( ns.c_str() );
if ( ! d ) {
result.appendArray( "values" , BSONObj() );
result.append( "stats" , BSON( "n" << 0 << "nscanned" << 0 << "nscannedObjects" << 0 ) );
return true;
}
shared_ptr<Cursor> cursor;
if ( ! query.isEmpty() ) {
cursor = NamespaceDetailsTransient::getCursor(ns.c_str() , query , BSONObj() );
}
else {
// query is empty, so lets see if we can find an index
// with the key so we don't have to hit the raw data
NamespaceDetails::IndexIterator ii = d->ii();
while ( ii.more() ) {
IndexDetails& idx = ii.next();
if ( d->isMultikey( ii.pos() - 1 ) )
continue;
if ( idx.inKeyPattern( key ) ) {
cursor = bestGuessCursor( ns.c_str() , BSONObj() , idx.keyPattern() );
if( cursor.get() ) break;
}
}
if ( ! cursor.get() )
cursor = NamespaceDetailsTransient::getCursor(ns.c_str() , query , BSONObj() );
}
assert( cursor );
string cursorName = cursor->toString();
auto_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns));
while ( cursor->ok() ) {
nscanned++;
bool loadedObject = false;
if ( ( !cursor->matcher() || cursor->matcher()->matchesCurrent( cursor.get() , &md ) ) &&
!cursor->getsetdup( cursor->currLoc() ) ) {
n++;
BSONElementSet temp;
loadedObject = ! cc->getFieldsDotted( key , temp );
for ( BSONElementSet::iterator i=temp.begin(); i!=temp.end(); ++i ) {
BSONElement e = *i;
if ( values.count( e ) )
continue;
int now = bb.len();
uassert(10044, "distinct too big, 16mb cap", ( now + e.size() + 1024 ) < bufSize );
arr.append( e );
BSONElement x( start + now );
values.insert( x );
}
}
if ( loadedObject || md._loadedObject )
nscannedObjects++;
cursor->advance();
if (!cc->yieldSometimes( ClientCursor::MaybeCovered )) {
cc.release();
break;
}
RARELY killCurrentOp.checkForInterrupt();
//.........这里部分代码省略.........