本文整理汇总了C++中DurableMappedFile类的典型用法代码示例。如果您正苦于以下问题:C++ DurableMappedFile类的具体用法?C++ DurableMappedFile怎么用?C++ DurableMappedFile使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DurableMappedFile类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: __declspec
__declspec(noinline) void PointerToDurableMappedFile::makeChunkWritable(size_t chunkno) {
stdx::lock_guard<stdx::mutex> lkPrivateViews(_m);
if (writable.get(chunkno)) // double check lock
return;
// remap all maps in this chunk.
// common case is a single map, but could have more than one with smallfiles or .ns files
size_t chunkStart = chunkno * MemoryMappedCOWBitset::ChunkSize;
size_t chunkNext = chunkStart + MemoryMappedCOWBitset::ChunkSize;
stdx::lock_guard<stdx::mutex> lkMapView(mapViewMutex);
map<void*, DurableMappedFile*>::iterator i = _views.upper_bound((void*)(chunkNext - 1));
while (1) {
const pair<void*, DurableMappedFile*> x = *(--i);
DurableMappedFile* mmf = x.second;
if (mmf == 0)
break;
size_t viewStart = reinterpret_cast<size_t>(x.first);
size_t viewEnd = viewStart + mmf->length();
if (viewEnd <= chunkStart)
break;
size_t protectStart = std::max(viewStart, chunkStart);
dassert(protectStart < chunkNext);
size_t protectEnd = std::min(viewEnd, chunkNext);
size_t protectSize = protectEnd - protectStart;
dassert(protectSize > 0 && protectSize <= MemoryMappedCOWBitset::ChunkSize);
DWORD oldProtection;
bool ok = VirtualProtect(
reinterpret_cast<void*>(protectStart), protectSize, PAGE_WRITECOPY, &oldProtection);
if (!ok) {
DWORD dosError = GetLastError();
if (dosError == ERROR_COMMITMENT_LIMIT) {
// System has run out of memory between physical RAM & page file, tell the user
BSONObjBuilder bb;
ProcessInfo p;
p.getExtraInfo(bb);
severe() << "MongoDB has exhausted the system memory capacity.";
severe() << "Current Memory Status: " << bb.obj();
}
severe() << "VirtualProtect for " << mmf->filename() << " chunk " << chunkno
<< " failed with " << errnoWithDescription(dosError) << " (chunk size is "
<< protectSize << ", address is " << hex << protectStart << dec << ")"
<< " in mongo::makeChunkWritable, terminating" << endl;
fassertFailed(16362);
}
}
writable.set(chunkno);
}
示例2: operator
void operator () (MongoFile *mf) {
if( mf->isDurableMappedFile() ) {
DurableMappedFile *mmf = (DurableMappedFile*) mf;
const unsigned char *p = (const unsigned char *) mmf->getView();
const unsigned char *w = (const unsigned char *) mmf->view_write();
if (!p || !w) return; // File not fully opened yet
_bytes += mmf->length();
verify( mmf->length() == (unsigned) mmf->length() );
if (memcmp(p, w, (unsigned) mmf->length()) == 0)
return; // next file
unsigned low = 0xffffffff;
unsigned high = 0;
log() << "DurParanoid mismatch in " << mmf->filename() << endl;
int logged = 0;
unsigned lastMismatch = 0xffffffff;
for( unsigned i = 0; i < mmf->length(); i++ ) {
if( p[i] != w[i] ) {
if( lastMismatch != 0xffffffff && lastMismatch+1 != i )
log() << endl; // separate blocks of mismatches
lastMismatch= i;
if( ++logged < 60 ) {
if( logged == 1 )
log() << "ofs % 628 = 0x" << hex << (i%628) << endl; // for .ns files to find offset in record
stringstream ss;
ss << "mismatch ofs:" << hex << i << "\tfilemap:" << setw(2) << (unsigned) w[i] << "\tprivmap:" << setw(2) << (unsigned) p[i];
if( p[i] > 32 && p[i] <= 126 )
ss << '\t' << p[i];
log() << ss.str() << endl;
}
if( logged == 60 )
log() << "..." << endl;
if( i < low ) low = i;
if( i > high ) high = i;
}
}
if( low != 0xffffffff ) {
std::stringstream ss;
ss << "journal error warning views mismatch " << mmf->filename() << ' ' << (hex) << low << ".." << high << " len:" << high-low+1;
log() << ss.str() << endl;
log() << "priv loc: " << (void*)(p+low) << ' ' << endl;
//vector<WriteIntent>& _intents = commitJob.wi()._intents;
//(void) _intents; // mark as unused. Useful for inspection in debugger
// should we abort() here so this isn't unnoticed in some circumstances?
massert(13599, "Written data does not match in-memory view. Missing WriteIntent?", false);
}
}
}
示例3: __declspec
__declspec(noinline) void makeChunkWritable(size_t chunkno) {
scoped_lock lk(mapViewMutex);
if( writable.get(chunkno) ) // double check lock
return;
// remap all maps in this chunk. common case is a single map, but could have more than one with smallfiles or .ns files
size_t chunkStart = chunkno * MemoryMappedFile::ChunkSize;
size_t chunkNext = chunkStart + MemoryMappedFile::ChunkSize;
scoped_lock lk2(privateViews._mutex());
map<void*,DurableMappedFile*>::iterator i = privateViews.finditer_inlock((void*) (chunkNext-1));
while( 1 ) {
const pair<void*,DurableMappedFile*> x = *(--i);
DurableMappedFile *mmf = x.second;
if( mmf == 0 )
break;
size_t viewStart = (size_t) x.first;
size_t viewEnd = (size_t) (viewStart + mmf->length());
if( viewEnd <= chunkStart )
break;
size_t protectStart = max(viewStart, chunkStart);
dassert(protectStart<chunkNext);
size_t protectEnd = min(viewEnd, chunkNext);
size_t protectSize = protectEnd - protectStart;
dassert(protectSize>0&&protectSize<=MemoryMappedFile::ChunkSize);
DWORD oldProtection;
bool ok = VirtualProtect( reinterpret_cast<void*>( protectStart ),
protectSize,
PAGE_WRITECOPY,
&oldProtection );
if ( !ok ) {
DWORD dosError = GetLastError();
log() << "VirtualProtect for " << mmf->filename()
<< " chunk " << chunkno
<< " failed with " << errnoWithDescription( dosError )
<< " (chunk size is " << protectSize
<< ", address is " << hex << protectStart << dec << ")"
<< " in mongo::makeChunkWritable, terminating"
<< endl;
fassertFailed( 16362 );
}
}
writable.set(chunkno);
}
示例4: find_inlock
/** underscore version of find is for when you are already locked
@param ofs out return our offset in the view
@return the DurableMappedFile to which this pointer belongs
*/
DurableMappedFile* PointerToDurableMappedFile::find_inlock(void *p, /*out*/ size_t& ofs) {
//
// .................memory..........................
// v1 p v2
// [--------------------] [-------]
//
// e.g., _find(p) == v1
//
const pair<void*,DurableMappedFile*> x = *(--_views.upper_bound(p));
DurableMappedFile *mmf = x.second;
if( mmf ) {
size_t o = ((char *)p) - ((char*)x.first);
if( o < mmf->length() ) {
ofs = o;
return mmf;
}
}
return 0;
}
示例5: prepBasicWrite_inlock
/** put the basic write operation into the buffer (bb) to be journaled */
static void prepBasicWrite_inlock(AlignedBuilder& bb,
const WriteIntent* i,
RelativePath& lastDbPath) {
size_t ofs = 1;
DurableMappedFile* mmf = findMMF_inlock(i->start(), /*out*/ ofs);
if (MONGO_unlikely(!mmf->willNeedRemap())) {
// tag this mmf as needed a remap of its private view later.
// usually it will already be dirty/already set, so we do the if above first
// to avoid possibility of cpu cache line contention
mmf->setWillNeedRemap();
}
// since we have already looked up the mmf, we go ahead and remember the write view location
// so we don't have to find the DurableMappedFile again later in WRITETODATAFILES()
//
// this was for WRITETODATAFILES_Impl2 so commented out now
//
/*
dassert( i->w_ptr == 0 );
i->w_ptr = ((char*)mmf->view_write()) + ofs;
*/
JEntry e;
e.len = min(i->length(), (unsigned)(mmf->length() - ofs)); // don't write past end of file
verify(ofs <= 0x80000000);
e.ofs = (unsigned)ofs;
e.setFileNo(mmf->fileSuffixNo());
if (mmf->relativePath() == local) {
e.setLocalDbContextBit();
} else if (mmf->relativePath() != lastDbPath) {
lastDbPath = mmf->relativePath();
JDbContext c;
bb.appendStruct(c);
bb.appendStr(lastDbPath.toString());
}
bb.appendStruct(e);
bb.appendBuf(i->start(), e.len);
if (MONGO_unlikely(e.len != (unsigned)i->length())) {
log() << "journal info splitting prepBasicWrite at boundary" << endl;
// This only happens if we write to the last byte in a file and
// the fist byte in another file that is mapped adjacently. I
// think most OSs leave at least a one page gap between
// mappings, but better to be safe.
WriteIntent next((char*)i->start() + e.len, i->length() - e.len);
prepBasicWrite_inlock(bb, &next, lastDbPath);
}
}
示例6: verify
void DurableImpl::debugCheckLastDeclaredWrite() {
static int n;
++n;
verify(debug && storageGlobalParams.dur);
if (commitJob.writes().empty())
return;
const WriteIntent &i = commitJob.lastWrite();
size_t ofs;
DurableMappedFile *mmf = privateViews.find(i.start(), ofs);
if( mmf == 0 )
return;
size_t past = ofs + i.length();
if( mmf->length() < past + 8 )
return; // too close to end of view
char *priv = (char *) mmf->getView();
char *writ = (char *) mmf->view_write();
unsigned long long *a = (unsigned long long *) (priv+past);
unsigned long long *b = (unsigned long long *) (writ+past);
if( *a != *b ) {
for( set<WriteIntent>::iterator it(commitJob.writes().begin()), end((commitJob.writes().begin())); it != end; ++it ) {
const WriteIntent& wi = *it;
char *r1 = (char*) wi.start();
char *r2 = (char*) wi.end();
if( r1 <= (((char*)a)+8) && r2 > (char*)a ) {
//log() << "it's ok " << wi.p << ' ' << wi.len << endl;
return;
}
}
log() << "journal data after write area " << i.start() << " does not agree" << endl;
log() << " was: " << ((void*)b) << " " << hexdump((char*)b, 8) << endl;
log() << " now: " << ((void*)a) << " " << hexdump((char*)a, 8) << endl;
log() << " n: " << n << endl;
log() << endl;
}
}
示例7: run
void run() {
try { boost::filesystem::remove(fn); }
catch(...) { }
MMAPV1LockerImpl lockState;
Lock::GlobalWrite lk(&lockState);
{
DurableMappedFile f;
unsigned long long len = 256 * 1024 * 1024;
verify( f.create(fn, len, /*sequential*/false) );
{
char *p = (char *) f.getView();
verify(p);
// write something to the private view as a test
if (storageGlobalParams.dur)
privateViews.makeWritable(p, 6);
strcpy(p, "hello");
}
if (storageGlobalParams.dur) {
char *w = (char *) f.view_write();
strcpy(w + 6, "world");
}
MongoFileFinder ff;
ASSERT( ff.findByPath(fn) );
ASSERT( ff.findByPath("asdf") == 0 );
}
{
MongoFileFinder ff;
ASSERT( ff.findByPath(fn) == 0 );
}
int N = 10000;
#if !defined(_WIN32) && !defined(__linux__)
// seems this test is slow on OS X.
N = 100;
#endif
// we make a lot here -- if we were leaking, presumably it would fail doing this many.
Timer t;
for( int i = 0; i < N; i++ ) {
DurableMappedFile f;
verify( f.open(fn, i%4==1) );
{
char *p = (char *) f.getView();
verify(p);
if (storageGlobalParams.dur)
privateViews.makeWritable(p, 4);
strcpy(p, "zzz");
}
if (storageGlobalParams.dur) {
char *w = (char *) f.view_write();
if( i % 2 == 0 )
++(*w);
verify( w[6] == 'w' );
}
}
if( t.millis() > 10000 ) {
mongo::unittest::log() << "warning: MMap LeakTest is unusually slow N:" << N <<
' ' << t.millis() << "ms" << endl;
}
}
示例8: _REMAPPRIVATEVIEW
static void _REMAPPRIVATEVIEW() {
// todo: Consider using ProcessInfo herein and watching for getResidentSize to drop. that could be a way
// to assure very good behavior here.
static unsigned startAt;
static unsigned long long lastRemap;
LOG(4) << "journal REMAPPRIVATEVIEW" << endl;
invariant(!commitJob.hasWritten());
// we want to remap all private views about every 2 seconds. there could be ~1000 views so
// we do a little each pass; beyond the remap time, more significantly, there will be copy on write
// faults after remapping, so doing a little bit at a time will avoid big load spikes on
// remapping.
unsigned long long now = curTimeMicros64();
double fraction = (now-lastRemap)/2000000.0;
if (storageGlobalParams.durOptions & StorageGlobalParams::DurAlwaysRemap)
fraction = 1;
lastRemap = now;
#if defined(_WIN32) || defined(__sunos__)
// Note that this negatively affects performance.
// We must grab the exclusive lock here because remapPrivateView() on Windows and
// Solaris need to grab it as well, due to the lack of an atomic way to remap a
// memory mapped file.
// See SERVER-5723 for performance improvement.
// See SERVER-5680 to see why this code is necessary on Windows.
// See SERVER-8795 to see why this code is necessary on Solaris.
LockMongoFilesExclusive lk;
#else
LockMongoFilesShared lk;
#endif
set<MongoFile*>& files = MongoFile::getAllFiles();
unsigned sz = files.size();
if( sz == 0 )
return;
{
// be careful not to use too much memory if the write rate is
// extremely high
double f = privateMapBytes / ((double)UncommittedBytesLimit);
if( f > fraction ) {
fraction = f;
}
privateMapBytes = 0;
}
unsigned ntodo = (unsigned) (sz * fraction);
if( ntodo < 1 ) ntodo = 1;
if( ntodo > sz ) ntodo = sz;
const set<MongoFile*>::iterator b = files.begin();
const set<MongoFile*>::iterator e = files.end();
set<MongoFile*>::iterator i = b;
// skip to our starting position
for( unsigned x = 0; x < startAt; x++ ) {
i++;
if( i == e ) i = b;
}
unsigned startedAt = startAt;
startAt = (startAt + ntodo) % sz; // mark where to start next time
Timer t;
for( unsigned x = 0; x < ntodo; x++ ) {
dassert( i != e );
if( (*i)->isDurableMappedFile() ) {
DurableMappedFile *mmf = (DurableMappedFile*) *i;
verify(mmf);
if( mmf->willNeedRemap() ) {
mmf->remapThePrivateView();
}
i++;
if( i == e ) i = b;
}
}
LOG(2) << "journal REMAPPRIVATEVIEW done startedAt: " << startedAt << " n:" << ntodo << ' ' << t.millis() << "ms" << endl;
}
示例9: defined
void CommitJob::note(void* p, int len) {
_hasWritten = true;
// from the point of view of the dur module, it would be fine (i think) to only
// be read locked here. but must be at least read locked to avoid race with
// remapprivateview
if( !_intentsAndDurOps._alreadyNoted.checkAndSet(p, len) ) {
/** tips for debugging:
if you have an incorrect diff between data files in different folders
(see jstests/dur/quick.js for example),
turn this on and see what is logged. if you have a copy of its output from before the
regression, a simple diff of these lines would tell you a lot likely.
*/
#if 0 && defined(_DEBUG)
{
static int n;
if( ++n < 10000 ) {
size_t ofs;
DurableMappedFile *mmf = privateViews._find(w.p, ofs);
if( mmf ) {
log() << "DEBUG note write intent " << w.p << ' ' << mmf->filename() << " ofs:" << hex << ofs << " len:" << w.len << endl;
}
else {
log() << "DEBUG note write intent " << w.p << ' ' << w.len << " NOT FOUND IN privateViews" << endl;
}
}
else if( n == 10000 ) {
log() << "DEBUG stopping write intent logging, too much to log" << endl;
}
}
#endif
// remember intent. we will journal it in a bit
_intentsAndDurOps.insertWriteIntent(p, len);
{
// a bit over conservative in counting pagebytes used
static size_t lastPos; // note this doesn't reset with each commit, but that is ok we aren't being that precise
size_t x = ((size_t) p) & ~0xfff; // round off to page address (4KB)
if( x != lastPos ) {
lastPos = x;
unsigned b = (len+4095) & ~0xfff;
_bytes += b;
if (_bytes > UncommittedBytesLimit * 3) {
static time_t lastComplain;
static unsigned nComplains;
// throttle logging
if( ++nComplains < 100 || time(0) - lastComplain >= 60 ) {
lastComplain = time(0);
warning() << "DR102 too much data written uncommitted " << _bytes/1000000.0 << "MB" << endl;
if( nComplains < 10 || nComplains % 10 == 0 ) {
// wassert makes getLastError show an error, so we just print stack trace
printStackTrace();
}
}
}
}
}
}
}