本文整理汇总了C++中DiskLoc::getOfs方法的典型用法代码示例。如果您正苦于以下问题:C++ DiskLoc::getOfs方法的具体用法?C++ DiskLoc::getOfs怎么用?C++ DiskLoc::getOfs使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DiskLoc
的用法示例。
在下文中一共展示了DiskLoc::getOfs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _repairExtent
DiskLoc _repairExtent( Database* db , string ns, bool forward , DiskLoc eLoc ){
LogIndentLevel lil;
if ( eLoc.getOfs() <= 0 ){
error() << "invalid extent ofs: " << eLoc.getOfs() << endl;
return DiskLoc();
}
MongoDataFile * mdf = db->getFile( eLoc.a() );
Extent * e = mdf->debug_getExtent( eLoc );
if ( ! e->isOk() ){
warning() << "Extent not ok magic: " << e->magic << " going to try to continue" << endl;
}
log() << "length:" << e->length << endl;
LogIndentLevel lil2;
DiskLoc loc = forward ? e->firstRecord : e->lastRecord;
while ( ! loc.isNull() ){
if ( loc.getOfs() <= 0 ){
error() << "offset is 0 for record which should be impossible" << endl;
break;
}
log() << loc << endl;
Record* rec = loc.rec();
log() << loc.obj() << endl;
loc = forward ? rec->getNext( loc ) : rec->getPrev( loc );
}
return forward ? e->xnext : e->xprev;
}
示例2: processDeletedRecord
/**
* analyzeDiskStorage helper which processes a single record.
*/
void processDeletedRecord(const DiskLoc& dl, const DeletedRecord* dr, const Extent* ex,
const AnalyzeParams& params, int bucketNum,
vector<DiskStorageData>& sliceData,
BSONArrayBuilder* deletedRecordsArrayBuilder) {
killCurrentOp.checkForInterrupt();
int extentOfs = ex->myLoc.getOfs();
if (! (dl.a() == ex->myLoc.a() &&
dl.getOfs() + dr->lengthWithHeaders() > extentOfs &&
dl.getOfs() < extentOfs + ex->length) ) {
return;
}
RecPos pos = RecPos::from(dl.getOfs(), dr->lengthWithHeaders(), extentOfs, params);
bool spansRequestedArea = false;
for (RecPos::SliceIterator it = pos.iterateSlices(); !it.end(); ++it) {
DiskStorageData& slice = sliceData[it->sliceNum];
slice.freeRecords[bucketNum] += it->ratioHere;
spansRequestedArea = true;
}
if (deletedRecordsArrayBuilder != NULL && spansRequestedArea) {
BSONObjBuilder(deletedRecordsArrayBuilder->subobjStart())
.append("ofs", dl.getOfs() - extentOfs)
.append("recBytes", dr->lengthWithHeaders());
}
}
示例3: inCapExtent
bool NamespaceDetails::inCapExtent( const DiskLoc &dl ) const {
invariant( !dl.isNull() );
if ( dl.a() != _capExtent.a() )
return false;
if ( dl.getOfs() < _capExtent.getOfs() )
return false;
const Extent* e = theCapExtent();
int end = _capExtent.getOfs() + e->length;
return dl.getOfs() <= end;
}
示例4: _createExtentInFile
DiskLoc ExtentManager::_createExtentInFile( int fileNo, DataFile* f,
int size, int maxFileNoForQuota ) {
size = ExtentManager::quantizeExtentSize( size );
if ( maxFileNoForQuota > 0 && fileNo - 1 >= maxFileNoForQuota ) {
if ( cc().hasWrittenThisPass() ) {
warning() << "quota exceeded, but can't assert" << endl;
}
else {
_quotaExceeded();
}
}
massert( 10358, "bad new extent size", size >= Extent::minSize() && size <= Extent::maxSize() );
DiskLoc loc = f->allocExtentArea( size );
loc.assertOk();
Extent *e = getExtent( loc, false );
verify( e );
getDur().writing(e)->init("", size, fileNo, loc.getOfs(), false);
return loc;
}
示例5: __capAlloc
DiskLoc NamespaceDetails::__capAlloc( int len ) {
DiskLoc prev = cappedLastDelRecLastExtent();
DiskLoc i = cappedFirstDeletedInCurExtent();
DiskLoc ret;
for (; !i.isNull() && inCapExtent( i ); prev = i, i = i.drec()->nextDeleted() ) {
// We need to keep at least one DR per extent in cappedListOfAllDeletedRecords(),
// so make sure there's space to create a DR at the end.
if ( i.drec()->lengthWithHeaders() >= len + 24 ) {
ret = i;
break;
}
}
/* unlink ourself from the deleted list */
if ( !ret.isNull() ) {
if ( prev.isNull() )
cappedListOfAllDeletedRecords().writing() = ret.drec()->nextDeleted();
else
prev.drec()->nextDeleted().writing() = ret.drec()->nextDeleted();
ret.drec()->nextDeleted().writing().setInvalid(); // defensive.
verify( ret.drec()->extentOfs() < ret.getOfs() );
}
return ret;
}
示例6: insert
// bypass standard alloc/insert routines to use the extent we want.
static DiskLoc insert( DiskLoc ext, int i ) {
BSONObjBuilder b;
b.append( "a", i );
BSONObj o = b.done();
int len = o.objsize();
Extent *e = ext.ext();
int ofs;
if ( e->lastRecord.isNull() )
ofs = ext.getOfs() + ( e->extentData - (char *)e );
else
ofs = e->lastRecord.getOfs() + e->lastRecord.rec()->lengthWithHeaders;
DiskLoc dl( ext.a(), ofs );
Record *r = dl.rec();
r->lengthWithHeaders = Record::HeaderSize + len;
r->extentOfs = e->myLoc.getOfs();
r->nextOfs = DiskLoc::NullOfs;
r->prevOfs = e->lastRecord.isNull() ? DiskLoc::NullOfs : e->lastRecord.getOfs();
memcpy( r->data, o.objdata(), len );
if ( e->firstRecord.isNull() )
e->firstRecord = dl;
else
e->lastRecord.rec()->nextOfs = ofs;
e->lastRecord = dl;
return dl;
}
示例7: _createExtentInFile
Extent* ExtentManager::_createExtentInFile( int fileNo, DataFile* f,
const char* ns, int size, bool newCapped,
bool enforceQuota ) {
size = ExtentManager::quantizeExtentSize( size );
if ( enforceQuota ) {
if ( fileIndexExceedsQuota( ns, fileNo - 1 ) ) {
if ( cc().hasWrittenThisPass() ) {
warning() << "quota exceeded, but can't assert "
<< " going over quota for: " << ns << " " << fileNo << endl;
}
else {
_quotaExceeded();
}
}
}
massert( 10358, "bad new extent size", size >= Extent::minSize() && size <= Extent::maxSize() );
DiskLoc loc = f->allocExtentArea( size );
loc.assertOk();
Extent *e = getExtent( loc, false );
verify( e );
DiskLoc emptyLoc = getDur().writing(e)->init(ns, size, fileNo, loc.getOfs(), newCapped);
addNewExtentToNamespace(ns, e, loc, emptyLoc, newCapped);
LOG(1) << "ExtentManager: creating new extent for: " << ns << " in file: " << fileNo
<< " size: " << size << endl;
return e;
}
示例8: touchNs
void touchNs( const std::string& ns ) {
std::vector< touch_location > ranges;
Client::ReadContext ctx(ns);
{
NamespaceDetails *nsd = nsdetails(ns.c_str());
uassert( 16154, "namespace does not exist", nsd );
for( DiskLoc L = nsd->firstExtent; !L.isNull(); L = L.ext()->xnext ) {
MongoDataFile* mdf = cc().database()->getFile( L.a() );
massert( 16238, "can't fetch extent file structure", mdf );
touch_location tl;
tl.fd = mdf->getFd();
tl.offset = L.getOfs();
tl.ext = L.ext();
tl.length = tl.ext->length;
ranges.push_back(tl);
}
}
LockMongoFilesShared lk;
Lock::TempRelease tr;
std::string progress_msg = "touch " + ns + " extents";
ProgressMeterHolder pm( cc().curop()->setMessage( progress_msg.c_str() , ranges.size() ) );
for ( std::vector< touch_location >::iterator it = ranges.begin(); it != ranges.end(); ++it ) {
touch_pages( it->fd, it->offset, it->length, it->ext );
pm.hit();
killCurrentOp.checkForInterrupt(false);
}
pm.finished();
}
示例9: processRecord
/**
* analyzeDiskStorage helper which processes a single record.
*/
void processRecord(const DiskLoc& dl, const DiskLoc& prevDl, const Record* r, int extentOfs,
const AnalyzeParams& params, vector<DiskStorageData>& sliceData,
BSONArrayBuilder* recordsArrayBuilder) {
killCurrentOp.checkForInterrupt();
BSONObj obj = dl.obj();
int recBytes = r->lengthWithHeaders();
double characteristicFieldValue = 0;
bool hasCharacteristicField = extractCharacteristicFieldValue(obj, params,
characteristicFieldValue);
bool isLocatedBeforePrevious = dl.a() < prevDl.a();
RecPos pos = RecPos::from(dl.getOfs(), recBytes, extentOfs, params);
bool spansRequestedArea = false;
for (RecPos::SliceIterator it = pos.iterateSlices(); !it.end(); ++it) {
spansRequestedArea = true;
DiskStorageData& slice = sliceData[it->sliceNum];
slice.numEntries += it->ratioHere;
slice.recBytes += it->sizeHere;
slice.bsonBytes += static_cast<long long>(it->ratioHere * obj.objsize());
if (hasCharacteristicField) {
slice.characteristicCount += it->ratioHere;
slice.characteristicSum += it->ratioHere * characteristicFieldValue;
}
if (isLocatedBeforePrevious) {
slice.outOfOrderRecs += it->ratioHere;
}
}
if (recordsArrayBuilder != NULL && spansRequestedArea) {
DEV {
int startsAt = dl.getOfs() - extentOfs;
int endsAt = startsAt + recBytes;
verify((startsAt < params.startOfs && endsAt > params.startOfs) ||
(startsAt < params.endOfs && endsAt >= params.endOfs) ||
(startsAt >= params.startOfs && endsAt < params.endOfs));
}
BSONObjBuilder recordBuilder(recordsArrayBuilder->subobjStart());
recordBuilder.append("ofs", dl.getOfs() - extentOfs);
recordBuilder.append("recBytes", recBytes);
recordBuilder.append("bsonBytes", obj.objsize());
recordBuilder.append("_id", obj["_id"]);
if (hasCharacteristicField) {
recordBuilder.append("characteristic", characteristicFieldValue);
}
recordBuilder.doneFast();
}
示例10: O
/* combine adjacent deleted records *for the current extent* of the capped collection
this is O(n^2) but we call it for capped tables where typically n==1 or 2!
(or 3...there will be a little unused sliver at the end of the extent.)
*/
void NamespaceDetails::compact() {
DDD( "NamespaceDetails::compact enter" );
verify( isCapped() );
vector<DiskLoc> drecs;
// Pull out capExtent's DRs from deletedList
DiskLoc i = cappedFirstDeletedInCurExtent();
for (; !i.isNull() && inCapExtent( i ); i = i.drec()->nextDeleted() ) {
DDD( "\t" << i );
drecs.push_back( i );
}
getDur().writingDiskLoc( cappedFirstDeletedInCurExtent() ) = i;
std::sort( drecs.begin(), drecs.end() );
DDD( "\t drecs.size(): " << drecs.size() );
vector<DiskLoc>::const_iterator j = drecs.begin();
verify( j != drecs.end() );
DiskLoc a = *j;
while ( 1 ) {
j++;
if ( j == drecs.end() ) {
DDD( "\t compact adddelrec" );
addDeletedRec(a.drec(), a);
break;
}
DiskLoc b = *j;
while ( a.a() == b.a() && a.getOfs() + a.drec()->lengthWithHeaders() == b.getOfs() ) {
// a & b are adjacent. merge.
getDur().writingInt( a.drec()->lengthWithHeaders() ) += b.drec()->lengthWithHeaders();
j++;
if ( j == drecs.end() ) {
DDD( "\t compact adddelrec2" );
addDeletedRec(a.drec(), a);
return;
}
b = *j;
}
DDD( "\t compact adddelrec3" );
addDeletedRec(a.drec(), a);
a = b;
}
}
示例11: O
/* combine adjacent deleted records *for the current extent* of the capped collection
this is O(n^2) but we call it for capped tables where typically n==1 or 2!
(or 3...there will be a little unused sliver at the end of the extent.)
*/
void NamespaceDetails::compact() {
assert(capped);
list<DiskLoc> drecs;
// Pull out capExtent's DRs from deletedList
DiskLoc i = cappedFirstDeletedInCurExtent();
for (; !i.isNull() && inCapExtent( i ); i = i.drec()->nextDeleted )
drecs.push_back( i );
getDur().writingDiskLoc( cappedFirstDeletedInCurExtent() ) = i;
// This is the O(n^2) part.
drecs.sort();
list<DiskLoc>::iterator j = drecs.begin();
assert( j != drecs.end() );
DiskLoc a = *j;
while ( 1 ) {
j++;
if ( j == drecs.end() ) {
DEBUGGING out() << "TEMP: compact adddelrec\n";
addDeletedRec(a.drec(), a);
break;
}
DiskLoc b = *j;
while ( a.a() == b.a() && a.getOfs() + a.drec()->lengthWithHeaders == b.getOfs() ) {
// a & b are adjacent. merge.
getDur().writingInt( a.drec()->lengthWithHeaders ) += b.drec()->lengthWithHeaders;
j++;
if ( j == drecs.end() ) {
DEBUGGING out() << "temp: compact adddelrec2\n";
addDeletedRec(a.drec(), a);
return;
}
b = *j;
}
DEBUGGING out() << "temp: compact adddelrec3\n";
addDeletedRec(a.drec(), a);
a = b;
}
}
示例12: invariant
void DiskLoc56Bit::operator=(const DiskLoc& loc) {
ofs = loc.getOfs();
int la = loc.a();
invariant( la <= 0xffffff ); // must fit in 3 bytes
if( la < 0 ) {
if ( la != -1 ) {
log() << "btree diskloc isn't negative 1: " << la << std::endl;
invariant ( la == -1 );
}
la = 0;
ofs = OurNullOfs;
}
memcpy(_a, &la, 3); // endian
}
示例13: addRecordToRecListInExtent
/** add a record to the end of the linked list chain within this extent.
require: you must have already declared write intent for the record header.
*/
void addRecordToRecListInExtent(Record *r, DiskLoc loc) {
dassert( loc.rec() == r );
Extent *e = r->myExtent(loc);
if ( e->lastRecord.isNull() ) {
Extent::FL *fl = getDur().writing(e->fl());
fl->firstRecord = fl->lastRecord = loc;
r->prevOfs() = r->nextOfs() = DiskLoc::NullOfs;
}
else {
Record *oldlast = e->lastRecord.rec();
r->prevOfs() = e->lastRecord.getOfs();
r->nextOfs() = DiskLoc::NullOfs;
getDur().writingInt(oldlast->nextOfs()) = loc.getOfs();
getDur().writingDiskLoc(e->lastRecord) = loc;
}
}
示例14: fast_oplog_insert
/* special version of insert for transaction logging -- streamlined a bit.
assumes ns is capped and no indexes
*/
Record* DataFileMgr::fast_oplog_insert(NamespaceDetails *d, const char *ns, int len) {
verify( d );
RARELY verify( d == nsdetails(ns) );
DEV verify( d == nsdetails(ns) );
massert( 16509,
str::stream()
<< "fast_oplog_insert requires a capped collection "
<< " but " << ns << " is not capped",
d->isCapped() );
//record timing on oplog inserts
boost::optional<TimerHolder> insertTimer;
//skip non-oplog collections
if (NamespaceString::oplog(ns)) {
insertTimer = boost::in_place(&oplogInsertStats);
oplogInsertBytesStats.increment(len); //record len of inserted records for oplog
}
int lenWHdr = len + Record::HeaderSize;
DiskLoc loc = d->alloc(ns, lenWHdr);
verify( !loc.isNull() );
Record *r = loc.rec();
verify( r->lengthWithHeaders() >= lenWHdr );
Extent *e = r->myExtent(loc);
if ( e->lastRecord.isNull() ) {
Extent::FL *fl = getDur().writing( e->fl() );
fl->firstRecord = fl->lastRecord = loc;
Record::NP *np = getDur().writing(r->np());
np->nextOfs = np->prevOfs = DiskLoc::NullOfs;
}
else {
Record *oldlast = e->lastRecord.rec();
Record::NP *np = getDur().writing(r->np());
np->prevOfs = e->lastRecord.getOfs();
np->nextOfs = DiskLoc::NullOfs;
getDur().writingInt( oldlast->nextOfs() ) = loc.getOfs();
e->lastRecord.writing() = loc;
}
d->incrementStats( r->netLength(), 1 );
return r;
}
示例15: dassert
void RecordStoreV1Base::_addRecordToRecListInExtent(OperationContext* txn,
Record *r,
DiskLoc loc) {
dassert( recordFor(loc) == r );
Extent *e = _getExtent( _getExtentLocForRecord( loc ) );
if ( e->lastRecord.isNull() ) {
*txn->recoveryUnit()->writing(&e->firstRecord) = loc;
*txn->recoveryUnit()->writing(&e->lastRecord) = loc;
r->prevOfs() = r->nextOfs() = DiskLoc::NullOfs;
}
else {
Record *oldlast = recordFor(e->lastRecord);
r->prevOfs() = e->lastRecord.getOfs();
r->nextOfs() = DiskLoc::NullOfs;
txn->recoveryUnit()->writingInt(oldlast->nextOfs()) = loc.getOfs();
*txn->recoveryUnit()->writing(&e->lastRecord) = loc;
}
}