本文整理汇总了C++中sleepsecs函数的典型用法代码示例。如果您正苦于以下问题:C++ sleepsecs函数的具体用法?C++ sleepsecs怎么用?C++ sleepsecs使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sleepsecs函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cc
/** thread for timing out old cursors */
void ClientCursorMonitor::run() {
Client::initThread("clientcursormon");
Client& client = cc();
unsigned old = curTimeMillis();
const int Secs = 4;
unsigned n = 0;
while ( ! inShutdown() ) {
unsigned now = curTimeMillis();
ClientCursor::idleTimeReport( now - old );
old = now;
sleepsecs(Secs);
if( ++n % (60/4) == 0 /*once a minute*/ ) {
sayMemoryStatus();
}
}
client.shutdown();
}
示例2: TEST
TEST(LockManagerTest, TxShutdown) {
LockManager lm;
ClientTransaction t1(&lm, 1);
ClientTransaction t2(&lm, 2);
t1.acquire(kShared, 1, ACQUIRED);
lm.shutdown(3000);
// t1 can still do work while quiescing
t1.release(kShared, 1);
t1.acquire(kShared, 2, ACQUIRED);
#ifdef TRANSACTION_REGISTRATION
// t2 is new and should be refused
t2.acquire(kShared, 3, ABORTED);
#else
t2.quit();
#endif
// after the quiescing period, t1's request should be refused
sleepsecs(3);
t1.acquire(kShared, 4, ABORTED);
}
示例3: name
void DataFileSync::run() {
Client::initThread( name().c_str() );
if (mmapv1GlobalOptions.syncdelay == 0) {
log() << "warning: --syncdelay 0 is not recommended and can have strange performance" << endl;
}
else if (mmapv1GlobalOptions.syncdelay == 1) {
log() << "--syncdelay 1" << endl;
}
else if (mmapv1GlobalOptions.syncdelay != 60) {
LOG(1) << "--syncdelay " << mmapv1GlobalOptions.syncdelay << endl;
}
int time_flushing = 0;
while ( ! inShutdown() ) {
_diaglog.flush();
if (mmapv1GlobalOptions.syncdelay == 0) {
// in case at some point we add an option to change at runtime
sleepsecs(5);
continue;
}
sleepmillis((long long) std::max(0.0, (mmapv1GlobalOptions.syncdelay * 1000) - time_flushing));
if ( inShutdown() ) {
// occasional issue trying to flush during shutdown when sleep interrupted
break;
}
Date_t start = jsTime();
StorageEngine* storageEngine = getGlobalEnvironment()->getGlobalStorageEngine();
int numFiles = storageEngine->flushAllFiles( true );
time_flushing = (int) (jsTime() - start);
_flushed(time_flushing);
if( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1)) || time_flushing >= 10000 ) {
log() << "flushing mmaps took " << time_flushing << "ms " << " for " << numFiles << " files" << endl;
}
}
}
示例4: r
/* note: not yet in mutex at this point.
returns >= 0 if ok. return -1 if you want to reconnect.
return value of zero indicates no sleep necessary before next call
*/
int ReplSource::sync(int& nApplied) {
_sleepAdviceTime = 0;
ReplInfo r("sync");
if ( !cmdLine.quiet ) {
Nullstream& l = log();
l << "repl: from ";
if( sourceName() != "main" ) {
l << "source:" << sourceName() << ' ';
}
l << "host:" << hostName << endl;
}
nClonedThisPass = 0;
// FIXME Handle cases where this db isn't on default port, or default port is spec'd in hostName.
if ( (string("localhost") == hostName || string("127.0.0.1") == hostName) && cmdLine.port == CmdLine::DefaultDBPort ) {
log() << "repl: can't sync from self (localhost). sources configuration may be wrong." << endl;
sleepsecs(5);
return -1;
}
if ( !oplogReader.connect(hostName) ) {
log(4) << "repl: can't connect to sync source" << endl;
return -1;
}
/*
// get current mtime at the server.
BSONObj o = conn->findOne("admin.$cmd", opTimeQuery);
BSONElement e = o.getField("optime");
if( e.eoo() ) {
log() << "repl: failed to get cur optime from master" << endl;
log() << " " << o.toString() << endl;
return false;
}
uassert( 10124 , e.type() == Date );
OpTime serverCurTime;
serverCurTime.asDate() = e.date();
*/
return sync_pullOpLog(nApplied);
}
示例5: run
virtual void run() {
Client::initThread( name().c_str() );
while ( ! inShutdown() ) {
sleepsecs( 60 );
LOG(3) << "TTLMonitor thread awake" << endl;
if ( lockedForWriting() ) {
// note: this is not perfect as you can go into fsync+lock between
// this and actually doing the delete later
LOG(3) << " locked for writing" << endl;
continue;
}
// if part of replSet but not in a readable state (e.g. during initial sync), skip.
if ( theReplSet && !theReplSet->state().readable() )
continue;
set<string> dbs;
{
Lock::DBRead lk( "local" );
dbHolder().getAllShortNames( dbs );
}
ttlPasses.increment();
for ( set<string>::const_iterator i=dbs.begin(); i!=dbs.end(); ++i ) {
string db = *i;
try {
doTTLForDB( db );
}
catch ( DBException& e ) {
error() << "error processing ttl for db: " << db << " " << e << endl;
}
}
}
}
示例6: syncDoInitialSync
void syncDoInitialSync() {
static const int maxFailedAttempts = 10;
OperationContextImpl txn;
createOplog(&txn);
int failedAttempts = 0;
while ( failedAttempts < maxFailedAttempts ) {
try {
_initialSync();
break;
}
catch(DBException& e) {
failedAttempts++;
mongoutils::str::stream msg;
error() << "initial sync exception: " << e.toString() << " " <<
(maxFailedAttempts - failedAttempts) << " attempts remaining";
sleepsecs(5);
}
}
fassert( 16233, failedAttempts < maxFailedAttempts);
}
示例7: run
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
cc().curop()->suppressFromCurop();
cc().curop()->setExpectedLatencyMs( 30000 );
BSONElement e = cmdObj.firstElement();
if ( e.type() != jstOID ) {
errmsg = "need oid as first value";
return 0;
}
// get the command issuer's (a mongos) serverID
const OID id = e.__oid();
// the command issuer is blocked awaiting a response
// we want to do return at least at every 5 minutes so sockets don't timeout
BSONObj z;
if ( writeBackManager.getWritebackQueue(id.str())->queue.blockingPop( z, 5 * 60 /* 5 minutes */ ) ) {
LOG(1) << "WriteBackCommand got : " << z << endl;
result.append( "data" , z );
}
else {
result.appendBool( "noop" , true );
}
#ifdef _DEBUG
PseudoRandom r(static_cast<int64_t>(time(0)));
// Sleep a short amount of time usually
int sleepFor = r.nextInt32( 10 );
sleepmillis( sleepFor );
// Sleep a longer amount of time every once and awhile
int sleepLong = r.nextInt32( 50 );
if( sleepLong == 0 ) sleepsecs( 2 );
#endif
return true;
}
示例8: run
int run() {
_sleep = getParam( "sleep" , _sleep );
auth();
BSONObj prev = getData();
while ( true ) {
sleepsecs( _sleep );
BSONObj now;
try {
now = getData();
}
catch ( std::exception& e ) {
cout << "can't get data: " << e.what() << endl;
continue;
}
if ( now.isEmpty() )
return -2;
try {
printDiff( prev , now );
}
catch ( AssertionException& e ) {
cout << "\nerror: " << e.what() << "\n"
<< now
<< endl;
}
prev = now;
}
return 0;
}
示例9: statsThread
void statsThread() {
/*cout << "TEMP disabled statsthread" << endl;
if( 1 )
return;*/
Client::initThread("stats");
unsigned long long timeLastPass = 0;
while ( 1 ) {
{
/* todo: do we even need readlock here? if so for what? */
readlock lk("");
Top::completeSnapshot();
q = (q+1)%NStats;
Timing timing;
dbMutex.info().getTimingInfo(timing.start, timing.timeLocked);
unsigned long long now = curTimeMicros64();
if ( timeLastPass ) {
unsigned long long dt = now - timeLastPass;
unsigned long long dlocked = timing.timeLocked - tlast.timeLocked;
{
stringstream ss;
ss << dt / 1000 << '\t';
ss << dlocked / 1000 << '\t';
if ( dt )
ss << (dlocked*100)/dt << '%';
string s = ss.str();
if ( cmdLine.cpu )
log() << "cpu: " << s << endl;
lockStats[q] = s;
ClientCursor::idleTimeReport( (unsigned) ((dt - dlocked)/1000) );
}
}
timeLastPass = now;
tlast = timing;
}
sleepsecs(4);
}
}
示例10: run
int run() {
_sleep = getParam( "sleep" , _sleep );
if (isMongos()) {
log() << "mongotop only works on instances of mongod." << endl;
return EXIT_FAILURE;
}
NamespaceStats prev = getData();
while ( true ) {
sleepsecs( _sleep );
NamespaceStats now;
try {
now = getData();
}
catch ( std::exception& e ) {
cout << "can't get data: " << e.what() << endl;
continue;
}
if ( now.size() == 0 )
return -2;
try {
printDiff( prev , now );
}
catch ( AssertionException& e ) {
cout << "\nerror: " << e.what() << endl;
}
prev = now;
}
return 0;
}
示例11: run
virtual void run(){
int minutesRunning = 0;
std::string lastRunningTestName, currentTestName;
{
boost::lock_guard<boost::mutex> lk( globalCurrentTestNameMutex );
lastRunningTestName = globalCurrentTestName;
}
while (true) {
sleepsecs(60);
minutesRunning++;
{
boost::lock_guard<boost::mutex> lk( globalCurrentTestNameMutex );
currentTestName = globalCurrentTestName;
}
if (currentTestName != lastRunningTestName) {
minutesRunning = 0;
lastRunningTestName = currentTestName;
}
if (minutesRunning > 30){
log() << currentTestName << " has been running for more than 30 minutes. aborting." << endl;
::abort();
}
else if (minutesRunning > 1){
warning() << currentTestName << " has been running for more than " << minutesRunning-1 << " minutes." << endl;
// See what is stuck
getGlobalLockManager()->dump();
}
}
}
示例12: while
void PeriodicTask::Runner::run() {
int sleeptime = 60;
DEV sleeptime = 5; // to catch race conditions
while ( ! inShutdown() ) {
sleepsecs( sleeptime );
scoped_spinlock lk( _lock );
size_t size = _tasks.size();
for ( size_t i=0; i<size; i++ ) {
PeriodicTask * t = _tasks[i];
if ( ! t )
continue;
if ( inShutdown() )
break;
Timer timer;
try {
t->taskDoWork();
}
catch ( std::exception& e ) {
error() << "task: " << t->taskName() << " failed: " << e.what() << endl;
}
catch ( ... ) {
error() << "task: " << t->taskName() << " failed with unknown error" << endl;
}
int ms = timer.millis();
LOG( ms <= 3 ? 3 : 0 ) << "task: " << t->taskName() << " took: " << ms << "ms" << endl;
}
}
}
示例13: lock
void BackgroundSync::_produce(OperationContext* txn, executor::TaskExecutor* taskExecutor) {
// this oplog reader does not do a handshake because we don't want the server it's syncing
// from to track how far it has synced
{
stdx::unique_lock<stdx::mutex> lock(_mutex);
if (_lastOpTimeFetched.isNull()) {
// then we're initial syncing and we're still waiting for this to be set
lock.unlock();
sleepsecs(1);
// if there is no one to sync from
return;
}
if (_replCoord->isWaitingForApplierToDrain() || _replCoord->getMemberState().primary() ||
inShutdownStrict()) {
return;
}
}
while (MONGO_FAIL_POINT(rsBgSyncProduce)) {
sleepmillis(0);
}
// find a target to sync from the last optime fetched
OpTime lastOpTimeFetched;
{
stdx::unique_lock<stdx::mutex> lock(_mutex);
lastOpTimeFetched = _lastOpTimeFetched;
_syncSourceHost = HostAndPort();
}
OplogReader syncSourceReader;
syncSourceReader.connectToSyncSource(txn, lastOpTimeFetched, _replCoord);
// no server found
if (syncSourceReader.getHost().empty()) {
sleepsecs(1);
// if there is no one to sync from
return;
}
long long lastHashFetched;
{
stdx::lock_guard<stdx::mutex> lock(_mutex);
if (_pause) {
return;
}
lastOpTimeFetched = _lastOpTimeFetched;
lastHashFetched = _lastFetchedHash;
_syncSourceHost = syncSourceReader.getHost();
_replCoord->signalUpstreamUpdater();
}
const Milliseconds oplogSocketTimeout(OplogReader::kSocketTimeout);
// Prefer host in oplog reader to _syncSourceHost because _syncSourceHost may be cleared
// if sync source feedback fails.
const HostAndPort source = syncSourceReader.getHost();
syncSourceReader.resetConnection();
// no more references to oplog reader from here on.
// If this status is not OK after the fetcher returns from wait(),
// proceed to execute rollback
Status remoteOplogStartStatus = Status::OK();
auto fetcherCallback = stdx::bind(&BackgroundSync::_fetcherCallback,
this,
stdx::placeholders::_1,
stdx::placeholders::_3,
stdx::cref(source),
lastOpTimeFetched,
lastHashFetched,
&remoteOplogStartStatus);
auto cmdObj = BSON("find" << nsToCollectionSubstring(rsOplogName) << "filter"
<< BSON("ts" << BSON("$gte" << lastOpTimeFetched.getTimestamp()))
<< "tailable" << true << "oplogReplay" << true << "awaitData" << true
<< "maxTimeMS" << int(fetcherMaxTimeMS.count()));
Fetcher fetcher(taskExecutor,
source,
nsToDatabase(rsOplogName),
cmdObj,
fetcherCallback,
rpc::makeEmptyMetadata());
auto scheduleStatus = fetcher.schedule();
if (!scheduleStatus.isOK()) {
warning() << "unable to schedule fetcher to read remote oplog on " << source << ": "
<< scheduleStatus;
return;
}
fetcher.wait();
// If the background sync is paused after the fetcher is started, we need to
// re-evaluate our sync source and oplog common point.
if (isPaused()) {
return;
}
// Execute rollback if necessary.
// Rollback is a synchronous operation that uses the task executor and may not be
//.........这里部分代码省略.........
示例14: runSyncThread
void runSyncThread() {
Client::initThread("rsSync");
AuthorizationSession::get(cc())->grantInternalAuthorization();
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
// Set initial indexPrefetch setting
const std::string& prefetch = replCoord->getSettings().rsIndexPrefetch;
if (!prefetch.empty()) {
BackgroundSync::IndexPrefetchConfig prefetchConfig = BackgroundSync::PREFETCH_ALL;
if (prefetch == "none")
prefetchConfig = BackgroundSync::PREFETCH_NONE;
else if (prefetch == "_id_only")
prefetchConfig = BackgroundSync::PREFETCH_ID_ONLY;
else if (prefetch == "all")
prefetchConfig = BackgroundSync::PREFETCH_ALL;
else {
warning() << "unrecognized indexPrefetch setting " << prefetch << ", defaulting "
<< "to \"all\"";
}
BackgroundSync::get()->setIndexPrefetchConfig(prefetchConfig);
}
while (!inShutdown()) {
// After a reconfig, we may not be in the replica set anymore, so
// check that we are in the set (and not an arbiter) before
// trying to sync with other replicas.
// TODO(spencer): Use a condition variable to await loading a config
if (replCoord->getMemberState().startup()) {
warning() << "did not receive a valid config yet";
sleepsecs(1);
continue;
}
const MemberState memberState = replCoord->getMemberState();
// An arbiter can never transition to any other state, and doesn't replicate, ever
if (memberState.arbiter()) {
break;
}
// If we are removed then we don't belong to the set anymore
if (memberState.removed()) {
sleepsecs(5);
continue;
}
try {
if (memberState.primary() && !replCoord->isWaitingForApplierToDrain()) {
sleepsecs(1);
continue;
}
bool initialSyncRequested = BackgroundSync::get()->getInitialSyncRequestedFlag();
// Check criteria for doing an initial sync:
// 1. If the oplog is empty, do an initial sync
// 2. If minValid has _initialSyncFlag set, do an initial sync
// 3. If initialSyncRequested is true
if (getGlobalReplicationCoordinator()->getMyLastOptime().isNull() ||
getInitialSyncFlag() || initialSyncRequested) {
syncDoInitialSync();
continue; // start from top again in case sync failed.
}
if (!replCoord->setFollowerMode(MemberState::RS_RECOVERING)) {
continue;
}
/* we have some data. continue tailing. */
SyncTail tail(BackgroundSync::get(), multiSyncApply);
tail.oplogApplication();
} catch (const DBException& e) {
log() << "Received exception while syncing: " << e.toString();
sleepsecs(10);
} catch (const std::exception& e) {
log() << "Received exception while syncing: " << e.what();
sleepsecs(10);
}
}
}
示例15: lock
void BackgroundSync::produce(OperationContext* txn) {
// this oplog reader does not do a handshake because we don't want the server it's syncing
// from to track how far it has synced
{
boost::unique_lock<boost::mutex> lock(_mutex);
if (_lastOpTimeFetched.isNull()) {
// then we're initial syncing and we're still waiting for this to be set
lock.unlock();
sleepsecs(1);
// if there is no one to sync from
return;
}
// Wait until we've applied the ops we have before we choose a sync target
while (!_appliedBuffer) {
_condvar.wait(lock);
}
}
while (MONGO_FAIL_POINT(rsBgSyncProduce)) {
sleepmillis(0);
}
// find a target to sync from the last optime fetched
OpTime lastOpTimeFetched;
{
boost::unique_lock<boost::mutex> lock(_mutex);
lastOpTimeFetched = _lastOpTimeFetched;
_syncSourceHost = HostAndPort();
}
_syncSourceReader.resetConnection();
_syncSourceReader.connectToSyncSource(txn, lastOpTimeFetched, _replCoord);
{
boost::unique_lock<boost::mutex> lock(_mutex);
// no server found
if (_syncSourceReader.getHost().empty()) {
lock.unlock();
sleepsecs(1);
// if there is no one to sync from
return;
}
lastOpTimeFetched = _lastOpTimeFetched;
_syncSourceHost = _syncSourceReader.getHost();
}
_syncSourceReader.tailingQueryGTE(rsoplog, lastOpTimeFetched);
// if target cut connections between connecting and querying (for
// example, because it stepped down) we might not have a cursor
if (!_syncSourceReader.haveCursor()) {
return;
}
if (_rollbackIfNeeded(txn, _syncSourceReader)) {
stop();
return;
}
while (!inShutdown()) {
if (!_syncSourceReader.moreInCurrentBatch()) {
// Check some things periodically
// (whenever we run out of items in the
// current cursor batch)
int bs = _syncSourceReader.currentBatchMessageSize();
if( bs > 0 && bs < BatchIsSmallish ) {
// on a very low latency network, if we don't wait a little, we'll be
// getting ops to write almost one at a time. this will both be expensive
// for the upstream server as well as potentially defeating our parallel
// application of batches on the secondary.
//
// the inference here is basically if the batch is really small, we are
// "caught up".
//
sleepmillis(SleepToAllowBatchingMillis);
}
// If we are transitioning to primary state, we need to leave
// this loop in order to go into bgsync-pause mode.
if (_replCoord->isWaitingForApplierToDrain() ||
_replCoord->getCurrentMemberState().primary()) {
return;
}
// re-evaluate quality of sync target
if (shouldChangeSyncSource()) {
return;
}
{
//record time for each getmore
TimerHolder batchTimer(&getmoreReplStats);
// This calls receiveMore() on the oplogreader cursor.
// It can wait up to five seconds for more data.
_syncSourceReader.more();
}
networkByteStats.increment(_syncSourceReader.currentBatchMessageSize());
//.........这里部分代码省略.........