本文整理汇总了C++中DiskLoc::isValid方法的典型用法代码示例。如果您正苦于以下问题:C++ DiskLoc::isValid方法的具体用法?C++ DiskLoc::isValid怎么用?C++ DiskLoc::isValid使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DiskLoc
的用法示例。
在下文中一共展示了DiskLoc::isValid方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: OK
Status SimpleRecordStoreV1::truncate(OperationContext* txn) {
const DiskLoc firstExtLoc = _details->firstExtent(txn);
if (firstExtLoc.isNull() || !firstExtLoc.isValid()) {
// Already empty
return Status::OK();
}
// Free all extents except the first.
Extent* firstExt = _extentManager->getExtent(firstExtLoc);
if (!firstExt->xnext.isNull()) {
const DiskLoc extNextLoc = firstExt->xnext;
const DiskLoc oldLastExtLoc = _details->lastExtent(txn);
Extent* const nextExt = _extentManager->getExtent(extNextLoc);
// Unlink other extents;
*txn->recoveryUnit()->writing(&nextExt->xprev) = DiskLoc();
*txn->recoveryUnit()->writing(&firstExt->xnext) = DiskLoc();
_details->setLastExtent(txn, firstExtLoc);
_details->setLastExtentSize(txn, firstExt->length);
_extentManager->freeExtents(txn, extNextLoc, oldLastExtLoc);
}
// Make the first (now only) extent a single large deleted record.
*txn->recoveryUnit()->writing(&firstExt->firstRecord) = DiskLoc();
*txn->recoveryUnit()->writing(&firstExt->lastRecord) = DiskLoc();
_details->orphanDeletedList(txn);
addDeletedRec(txn, _findFirstSpot(txn, firstExtLoc, firstExt));
// Make stats reflect that there are now no documents in this record store.
_details->setStats(txn, 0, 0);
return Status::OK();
}
示例2: addKey
Status addKey(const BSONObj& key, const DiskLoc& loc) {
// inserts should be in ascending (key, DiskLoc) order.
if ( key.objsize() >= TempKeyMaxSize ) {
return Status(ErrorCodes::KeyTooLong, "key too big");
}
invariant(!loc.isNull());
invariant(loc.isValid());
invariant(!hasFieldNames(key));
if (!_data->empty()) {
// Compare specified key with last inserted key, ignoring its DiskLoc
int cmp = _comparator.compare(IndexKeyEntry(key, DiskLoc()), *_last);
if (cmp < 0 || (_dupsAllowed && cmp == 0 && loc < _last->loc)) {
return Status(ErrorCodes::InternalError,
"expected ascending (key, DiskLoc) order in bulk builder");
}
else if (!_dupsAllowed && cmp == 0 && loc != _last->loc) {
return dupKeyError(key);
}
}
BSONObj owned = key.getOwned();
_last = _data->insert(_data->end(), IndexKeyEntry(owned, loc));
*_currentKeySize += key.objsize();
return Status::OK();
}
示例3: insert
virtual Status insert(OperationContext* txn,
const BSONObj& key,
const DiskLoc& loc,
bool dupsAllowed) {
invariant(!loc.isNull());
invariant(loc.isValid());
invariant(!hasFieldNames(key));
if ( key.objsize() >= TempKeyMaxSize ) {
string msg = mongoutils::str::stream()
<< "Heap1Btree::insert: key too large to index, failing "
<< ' ' << key.objsize() << ' ' << key;
return Status(ErrorCodes::KeyTooLong, msg);
}
// TODO optimization: save the iterator from the dup-check to speed up insert
if (!dupsAllowed && isDup(*_data, key, loc))
return dupKeyError(key);
BSONObj owned = key.getOwned();
if ( _data->insert(IndexKeyEntry(owned, loc)).second ) {
_currentKeySize += key.objsize();
Heap1RecoveryUnit::notifyIndexInsert( txn, this, owned, loc );
}
return Status::OK();
}
示例4: unindex
virtual bool unindex(OperationContext* txn, const BSONObj& key, const DiskLoc& loc) {
invariant(!loc.isNull());
invariant(loc.isValid());
invariant(!hasFieldNames(key));
const size_t numDeleted = _data->erase(IndexEntry(key, loc));
invariant(numDeleted <= 1);
if ( numDeleted == 1 )
_currentKeySize -= key.objsize();
return numDeleted == 1;
}
示例5: unindex
virtual bool unindex(OperationContext* txn, const BSONObj& key, const DiskLoc& loc) {
invariant(!loc.isNull());
invariant(loc.isValid());
invariant(!hasFieldNames(key));
const size_t numDeleted = _data->erase(IndexKeyEntry(key, loc));
invariant(numDeleted <= 1);
if ( numDeleted == 1 ) {
_currentKeySize -= key.objsize();
Heap1RecoveryUnit::notifyIndexRemove( txn, this, key, loc );
}
return numDeleted == 1;
}
示例6: addKey
Status addKey(const BSONObj& key, const DiskLoc& loc) {
// inserts should be in ascending order.
if ( key.objsize() >= TempKeyMaxSize ) {
return Status(ErrorCodes::KeyTooLong, "key too big");
}
invariant(!loc.isNull());
invariant(loc.isValid());
invariant(!hasFieldNames(key));
// TODO optimization: dup check can assume dup is only possible with last inserted key
// and avoid the log(n) lookup.
if (!_dupsAllowed && isDup(*_data, key, loc))
return dupKeyError(key);
_data->insert(_data->end(), IndexEntry(key.getOwned(), loc));
*_currentKeySize += key.objsize();
return Status::OK();
}
示例7: getOplogStartHack
Status getOplogStartHack(OperationContext* txn,
Collection* collection,
CanonicalQuery* cq,
PlanExecutor** execOut) {
invariant(cq);
auto_ptr<CanonicalQuery> autoCq(cq);
if ( collection == NULL )
return Status(ErrorCodes::InternalError,
"getOplogStartHack called with a NULL collection" );
// A query can only do oplog start finding if it has a top-level $gt or $gte predicate over
// the "ts" field (the operation's timestamp). Find that predicate and pass it to
// the OplogStart stage.
MatchExpression* tsExpr = NULL;
if (MatchExpression::AND == cq->root()->matchType()) {
// The query has an AND at the top-level. See if any of the children
// of the AND are $gt or $gte predicates over 'ts'.
for (size_t i = 0; i < cq->root()->numChildren(); ++i) {
MatchExpression* me = cq->root()->getChild(i);
if (isOplogTsPred(me)) {
tsExpr = me;
break;
}
}
}
else if (isOplogTsPred(cq->root())) {
// The root of the tree is a $gt or $gte predicate over 'ts'.
tsExpr = cq->root();
}
if (NULL == tsExpr) {
return Status(ErrorCodes::OplogOperationUnsupported,
"OplogReplay query does not contain top-level "
"$gt or $gte over the 'ts' field.");
}
DiskLoc startLoc = DiskLoc().setInvalid();
// See if the RecordStore supports the oplogStartHack
const BSONElement tsElem = extractOplogTsOptime(tsExpr);
if (tsElem.type() == Timestamp) {
StatusWith<DiskLoc> goal = oploghack::keyForOptime(tsElem._opTime());
if (goal.isOK()) {
startLoc = collection->getRecordStore()->oplogStartHack(txn, goal.getValue());
}
}
if (startLoc.isValid()) {
LOG(3) << "Using direct oplog seek";
}
else {
LOG(3) << "Using OplogStart stage";
// Fallback to trying the OplogStart stage.
WorkingSet* oplogws = new WorkingSet();
OplogStart* stage = new OplogStart(txn, collection, tsExpr, oplogws);
PlanExecutor* rawExec;
// Takes ownership of oplogws and stage.
Status execStatus = PlanExecutor::make(txn, oplogws, stage, collection,
PlanExecutor::YIELD_AUTO, &rawExec);
invariant(execStatus.isOK());
scoped_ptr<PlanExecutor> exec(rawExec);
// The stage returns a DiskLoc of where to start.
PlanExecutor::ExecState state = exec->getNext(NULL, &startLoc);
// This is normal. The start of the oplog is the beginning of the collection.
if (PlanExecutor::IS_EOF == state) {
return getExecutor(txn, collection, autoCq.release(), PlanExecutor::YIELD_AUTO,
execOut);
}
// This is not normal. An error was encountered.
if (PlanExecutor::ADVANCED != state) {
return Status(ErrorCodes::InternalError,
"quick oplog start location had error...?");
}
}
// cout << "diskloc is " << startLoc.toString() << endl;
// Build our collection scan...
CollectionScanParams params;
params.collection = collection;
params.start = startLoc;
params.direction = CollectionScanParams::FORWARD;
params.tailable = cq->getParsed().getOptions().tailable;
WorkingSet* ws = new WorkingSet();
CollectionScan* cs = new CollectionScan(txn, params, ws, cq->root());
// Takes ownership of 'ws', 'cs', and 'cq'.
return PlanExecutor::make(txn, ws, cs, autoCq.release(), collection,
PlanExecutor::YIELD_AUTO, execOut);
}
示例8: validateNS
//.........这里部分代码省略.........
if ( cl < cl_last )
outOfOrder++;
cl_last = cl;
}
Record *r = c->_current();
len += r->lengthWithHeaders;
nlen += r->netLength();
c->advance();
}
if ( d->capped && !d->capLooped() ) {
ss << " capped outOfOrder:" << outOfOrder;
if ( outOfOrder > 1 ) {
valid = false;
ss << " ???";
}
else ss << " (OK)";
ss << '\n';
}
ss << " " << n << " objects found, nobj:" << d->stats.nrecords << '\n';
ss << " " << len << " bytes data w/headers\n";
ss << " " << nlen << " bytes data wout/headers\n";
}
ss << " deletedList: ";
for ( int i = 0; i < Buckets; i++ ) {
ss << (d->deletedList[i].isNull() ? '0' : '1');
}
ss << endl;
int ndel = 0;
long long delSize = 0;
int incorrect = 0;
for ( int i = 0; i < Buckets; i++ ) {
DiskLoc loc = d->deletedList[i];
try {
int k = 0;
while ( !loc.isNull() ) {
if ( recs.count(loc) )
incorrect++;
ndel++;
if ( loc.questionable() ) {
if( d->capped && !loc.isValid() && i == 1 ) {
/* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
see comments in namespace.h
*/
break;
}
if ( loc.a() <= 0 || strstr(ns, "hudsonSmall") == 0 ) {
ss << " ?bad deleted loc: " << loc.toString() << " bucket:" << i << " k:" << k << endl;
valid = false;
break;
}
}
DeletedRecord *d = loc.drec();
delSize += d->lengthWithHeaders;
loc = d->nextDeleted;
k++;
killCurrentOp.checkForInterrupt();
}
}
catch (...) {
ss <<" ?exception in deleted chain for bucket " << i << endl;
valid = false;
}
}
ss << " deleted: n: " << ndel << " size: " << delSize << endl;
if ( incorrect ) {
ss << " ?corrupt: " << incorrect << " records from datafile are in deleted list\n";
valid = false;
}
int idxn = 0;
try {
ss << " nIndexes:" << d->nIndexes << endl;
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() ) {
IndexDetails& id = i.next();
ss << " " << id.indexNamespace() << " keys:" <<
id.head.btree()->fullValidate(id.head, id.keyPattern()) << endl;
}
}
catch (...) {
ss << "\n exception during index validate idxn:" << idxn << endl;
valid=false;
}
}
catch (AssertionException) {
ss << "\n exception during validate\n" << endl;
valid = false;
}
if ( !valid )
ss << " ns corrupt, requires dbchk\n";
return ss.str();
}
示例9: validateNS
//.........这里部分代码省略.........
if ( n < 1000000 )
recs.insert(cl);
if ( nsd->isCapped() ) {
if ( cl < cl_last )
outOfOrder++;
cl_last = cl;
}
Record *r = cl.rec();
len += r->lengthWithHeaders();
nlen += r->netLength();
if ( r->lengthWithHeaders() ==
NamespaceDetails::quantizeAllocationSpace
( r->lengthWithHeaders() ) ) {
// Count the number of records having a size consistent with
// the quantizeAllocationSpace quantization implementation.
++nQuantizedSize;
}
if ( r->lengthWithHeaders() ==
NamespaceDetails::quantizePowerOf2AllocationSpace
( r->lengthWithHeaders() - 1 ) ) {
// Count the number of records having a size consistent with the
// quantizePowerOf2AllocationSpace quantization implementation.
// Because of SERVER-8311, power of 2 quantization is not idempotent and
// r->lengthWithHeaders() - 1 must be checked instead of the record
// length itself.
++nPowerOf2QuantizedSize;
}
if (full){
BSONObj obj = BSONObj::make(r);
if (!obj.isValid() || !obj.valid()){ // both fast and deep checks
valid = false;
if (nInvalid == 0) // only log once;
errors << "invalid bson object detected (see logs for more info)";
nInvalid++;
if (strcmp("_id", obj.firstElementFieldName()) == 0){
try {
obj.firstElement().validate(); // throws on error
log() << "Invalid bson detected in " << ns << " with _id: " << obj.firstElement().toString(false) << endl;
}
catch(...){
log() << "Invalid bson detected in " << ns << " with corrupt _id" << endl;
}
}
else {
log() << "Invalid bson detected in " << ns << " and couldn't find _id" << endl;
}
}
else {
bsonLen += obj.objsize();
}
}
}
if (Runner::RUNNER_EOF != state) {
// TODO: more descriptive logging.
warning() << "Internal error while reading collection " << ns << endl;
}
if ( nsd->isCapped() && !nsd->capLooped() ) {
result.append("cappedOutOfOrder", outOfOrder);
if ( outOfOrder > 1 ) {
valid = false;
errors << "too many out of order records";
示例10: OK
//.........这里部分代码省略.........
}
else {
bsonLen += dataSize;
}
}
}
if ( isCapped() && !_details->capLooped() ) {
output->append("cappedOutOfOrder", outOfOrder);
if ( outOfOrder > 1 ) {
results->valid = false;
results->errors.push_back( "too many out of order records" );
}
}
output->append("objectsFound", n);
if (full) {
output->append("invalidObjects", nInvalid);
}
output->appendNumber("nQuantizedSize", nQuantizedSize);
output->appendNumber("nPowerOf2QuantizedSize", nPowerOf2QuantizedSize);
output->appendNumber("bytesWithHeaders", len);
output->appendNumber("bytesWithoutHeaders", nlen);
if (full) {
output->appendNumber("bytesBson", bsonLen);
}
} // end scanData
// 55555555555555555555555555
BSONArrayBuilder deletedListArray;
for ( int i = 0; i < Buckets; i++ ) {
deletedListArray << _details->deletedListEntry(i).isNull();
}
int ndel = 0;
long long delSize = 0;
BSONArrayBuilder delBucketSizes;
int incorrect = 0;
for ( int i = 0; i < Buckets; i++ ) {
DiskLoc loc = _details->deletedListEntry(i);
try {
int k = 0;
while ( !loc.isNull() ) {
if ( recs.count(loc) )
incorrect++;
ndel++;
if ( loc.questionable() ) {
if( isCapped() && !loc.isValid() && i == 1 ) {
/* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
see comments in namespace.h
*/
break;
}
string err( str::stream() << "bad pointer in deleted record list: "
<< loc.toString()
<< " bucket: " << i
<< " k: " << k );
results->errors.push_back( err );
results->valid = false;
break;
}
const DeletedRecord* d = deletedRecordFor(loc);
delSize += d->lengthWithHeaders();
loc = d->nextDeleted();
k++;
txn->checkForInterrupt();
}
delBucketSizes << k;
}
catch (...) {
results->errors.push_back( (string)"exception in deleted chain for bucket " +
BSONObjBuilder::numStr(i) );
results->valid = false;
}
}
output->appendNumber("deletedCount", ndel);
output->appendNumber("deletedSize", delSize);
if ( full ) {
output->append( "delBucketSizes", delBucketSizes.arr() );
}
if ( incorrect ) {
results->errors.push_back( BSONObjBuilder::numStr(incorrect) +
" records from datafile are in deleted list" );
results->valid = false;
}
}
catch (AssertionException) {
results->errors.push_back( "exception during validate" );
results->valid = false;
}
return Status::OK();
}
示例11: validateNS
void validateNS(const char *ns, NamespaceDetails *d, const BSONObj& cmdObj, BSONObjBuilder& result) {
const bool full = cmdObj["full"].trueValue();
const bool scanData = full || cmdObj["scandata"].trueValue();
bool valid = true;
BSONArrayBuilder errors; // explanation(s) for why valid = false
if ( d->isCapped() ){
result.append("capped", d->isCapped());
result.appendNumber("max", d->maxCappedDocs());
}
result.append("firstExtent", str::stream() << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString());
result.append( "lastExtent", str::stream() << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString());
BSONArrayBuilder extentData;
try {
d->firstExtent.ext()->assertOk();
d->lastExtent.ext()->assertOk();
DiskLoc el = d->firstExtent;
int ne = 0;
while( !el.isNull() ) {
Extent *e = el.ext();
e->assertOk();
el = e->xnext;
ne++;
if ( full )
extentData << e->dump();
killCurrentOp.checkForInterrupt();
}
result.append("extentCount", ne);
}
catch (...) {
valid=false;
errors << "extent asserted";
}
if ( full )
result.appendArray( "extents" , extentData.arr() );
result.appendNumber("datasize", d->stats.datasize);
result.appendNumber("nrecords", d->stats.nrecords);
result.appendNumber("lastExtentSize", d->lastExtentSize);
result.appendNumber("padding", d->paddingFactor());
try {
try {
result.append("firstExtentDetails", d->firstExtent.ext()->dump());
valid = valid && d->firstExtent.ext()->validates() &&
d->firstExtent.ext()->xprev.isNull();
}
catch (...) {
errors << "exception firstextent";
valid = false;
}
set<DiskLoc> recs;
if( scanData ) {
shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
int n = 0;
int nInvalid = 0;
long long len = 0;
long long nlen = 0;
int outOfOrder = 0;
DiskLoc cl_last;
while ( c->ok() ) {
n++;
DiskLoc cl = c->currLoc();
if ( n < 1000000 )
recs.insert(cl);
if ( d->isCapped() ) {
if ( cl < cl_last )
outOfOrder++;
cl_last = cl;
}
Record *r = c->_current();
len += r->lengthWithHeaders();
nlen += r->netLength();
if (full){
BSONObj obj = BSONObj::make(r);
if (!obj.isValid() || !obj.valid()){ // both fast and deep checks
valid = false;
if (nInvalid == 0) // only log once;
errors << "invalid bson object detected (see logs for more info)";
nInvalid++;
if (strcmp("_id", obj.firstElementFieldName()) == 0){
try {
obj.firstElement().validate(); // throws on error
log() << "Invalid bson detected in " << ns << " with _id: " << obj.firstElement().toString(false) << endl;
}
//.........这里部分代码省略.........