本文整理汇总了C++中Milliseconds::count方法的典型用法代码示例。如果您正苦于以下问题:C++ Milliseconds::count方法的具体用法?C++ Milliseconds::count怎么用?C++ Milliseconds::count使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Milliseconds
的用法示例。
在下文中一共展示了Milliseconds::count方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: randtime
Milliseconds randtime(Milliseconds const& min, Milliseconds const& max)
{
long long diff = max.count() - min.count();
ASSERT(diff >= 0);
ASSERT(diff <= (uint32)-1);
return min + Milliseconds(urand(0, diff));
}
示例2: roundTime
Date_t roundTime(Date_t now, Milliseconds period) {
// Note: auto type deduction is explicitly avoided here to ensure rigid type correctness
long long clock_duration = now.toMillisSinceEpoch();
long long now_next_period = clock_duration + period.count();
long long excess_time(now_next_period % period.count());
long long next_time = now_next_period - excess_time;
return Date_t::fromMillisSinceEpoch(next_time);
}
示例3:
DistLockCatalogImpl::DistLockCatalogImpl(RemoteCommandTargeter* targeter,
ShardRegistry* shardRegistry,
Milliseconds writeConcernTimeout)
: _client(shardRegistry),
_targeter(targeter),
_writeConcern(WriteConcernOptions(WriteConcernOptions::kMajority,
WriteConcernOptions::JOURNAL,
writeConcernTimeout.count())),
_lockPingNS(LockpingsType::ConfigNS),
_locksNS(LocksType::ConfigNS) {}
示例4:
DistLockCatalogImpl::DistLockCatalogImpl(RemoteCommandTargeter* targeter,
RemoteCommandRunner* executor,
Milliseconds writeConcernTimeout):
_cmdRunner(executor),
_targeter(targeter),
_writeConcern(WriteConcernOptions(WriteConcernOptions::kMajority,
WriteConcernOptions::JOURNAL,
writeConcernTimeout.count())),
_lockPingNS(LockpingsType::ConfigNS),
_locksNS(LocksType::ConfigNS) {
}
示例5: SharedRecursiveLock
//-----------------------------------------------------------------------
HTTP::HTTPQuery::HTTPQuery(
const make_private &,
HTTPPtr outer,
IHTTPQueryDelegatePtr delegate,
bool isPost,
const char *userAgent,
const char *url,
const BYTE *postData,
size_t postDataLengthInBytes,
const char *postDataMimeType,
Milliseconds timeout
) :
SharedRecursiveLock(outer ? *outer : SharedRecursiveLock::create()),
MessageQueueAssociator(IHelper::getServiceQueue()),
mOuter(outer),
mDelegate(IHTTPQueryDelegateProxy::create(Helper::getServiceQueue(), delegate)),
mIsPost(isPost),
mUserAgent(userAgent),
mURL(url),
mMimeType(postDataMimeType),
mTimeout(timeout),
mStatusCode(HttpStatusCode::None)
{
ZS_LOG_DEBUG(log("created"))
if (0 != postDataLengthInBytes) {
mPostData.CleanNew(postDataLengthInBytes);
memcpy(mPostData.BytePtr(), postData, postDataLengthInBytes);
}
ZS_EVENTING_8(
x, i, Debug, ServicesHttpQueryCreate, os, Http, Start,
puid, id, mID,
bool, isPost, mIsPost,
string, userAgent, mUserAgent,
string, url, mURL,
buffer, postData, postData,
size, postSize, postDataLengthInBytes,
string, postDataMimeType, postDataMimeType,
duration, timeout, timeout.count()
);
}
示例6: lk
ConnectionPool::ConnectionList::iterator ConnectionPool::acquireConnection(
const HostAndPort& target,
Date_t now,
Milliseconds timeout) {
boost::unique_lock<boost::mutex> lk(_mutex);
// Clean up connections on stale/unused hosts
_cleanUpStaleHosts_inlock(now);
for (HostConnectionMap::iterator hostConns;
((hostConns = _connections.find(target)) != _connections.end());) {
// Clean up the requested host to remove stale/unused connections
_cleanUpOlderThan_inlock(now, &hostConns->second);
if (hostConns->second.empty()) {
// prevent host from causing unnecessary cleanups
_lastUsedHosts[hostConns->first] = kNeverTooStale;
break;
}
_inUseConnections.splice(_inUseConnections.begin(),
hostConns->second,
hostConns->second.begin());
const ConnectionList::iterator candidate = _inUseConnections.begin();
lk.unlock();
try {
if (candidate->conn->isStillConnected()) {
// setSoTimeout takes a double representing the number of seconds for send and
// receive timeouts. Thus, we must take count() and divide by
// 1000.0 to get the number of seconds with a fractional part.
candidate->conn->setSoTimeout(timeout.count() / 1000.0);
return candidate;
}
}
catch (...) {
lk.lock();
_destroyConnection_inlock(&_inUseConnections, candidate);
throw;
}
lk.lock();
_destroyConnection_inlock(&_inUseConnections, candidate);
}
// No idle connection in the pool; make a new one.
lk.unlock();
std::auto_ptr<DBClientConnection> conn(new DBClientConnection);
// setSoTimeout takes a double representing the number of seconds for send and receive
// timeouts. Thus, we must take count() and divide by 1000.0 to get the number
// of seconds with a fractional part.
conn->setSoTimeout(timeout.count() / 1000.0);
std::string errmsg;
uassert(28640,
str::stream() << "Failed attempt to connect to "
<< target.toString() << "; " << errmsg,
conn->connect(target, errmsg));
conn->port().tag |= _messagingPortTags;
if (getGlobalAuthorizationManager()->isAuthEnabled()) {
uassert(ErrorCodes::AuthenticationFailed,
"Missing credentials for authenticating as internal user",
isInternalAuthSet());
conn->auth(getInternalUserAuthParamsWithFallback());
}
lk.lock();
return _inUseConnections.insert(_inUseConnections.begin(),
ConnectionInfo(conn.release(), now));
}
示例7: lk
ConnectionPool::ConnectionList::iterator ConnectionPool::acquireConnection(
const HostAndPort& target, Date_t now, Milliseconds timeout) {
stdx::unique_lock<stdx::mutex> lk(_mutex);
// Clean up connections on stale/unused hosts
_cleanUpStaleHosts_inlock(now);
for (HostConnectionMap::iterator hostConns;
(hostConns = _connections.find(target)) != _connections.end();) {
// Clean up the requested host to remove stale/unused connections
_cleanUpOlderThan_inlock(now, &hostConns->second);
if (hostConns->second.empty()) {
// prevent host from causing unnecessary cleanups
_lastUsedHosts[hostConns->first] = kNeverTooStale;
break;
}
_inUseConnections.splice(
_inUseConnections.begin(), hostConns->second, hostConns->second.begin());
const ConnectionList::iterator candidate = _inUseConnections.begin();
lk.unlock();
try {
if (candidate->conn->isStillConnected()) {
// setSoTimeout takes a double representing the number of seconds for send and
// receive timeouts. Thus, we must take count() and divide by
// 1000.0 to get the number of seconds with a fractional part.
candidate->conn->setSoTimeout(timeout.count() / 1000.0);
return candidate;
}
} catch (...) {
lk.lock();
_destroyConnection_inlock(&_inUseConnections, candidate);
throw;
}
lk.lock();
_destroyConnection_inlock(&_inUseConnections, candidate);
}
// No idle connection in the pool; make a new one.
lk.unlock();
std::unique_ptr<DBClientConnection> conn(new DBClientConnection());
// setSoTimeout takes a double representing the number of seconds for send and receive
// timeouts. Thus, we must take count() and divide by 1000.0 to get the number
// of seconds with a fractional part.
conn->setSoTimeout(timeout.count() / 1000.0);
if (_hook) {
uassertStatusOK(
conn->connect(target,
[this, &target](const executor::RemoteCommandResponse& isMasterReply) {
return _hook->validateHost(target, isMasterReply);
}));
auto postConnectRequest = uassertStatusOK(_hook->makeRequest(target));
// We might not have a postConnectRequest
if (postConnectRequest != boost::none) {
auto start = Date_t::now();
auto reply =
conn->runCommandWithMetadata(postConnectRequest->dbname,
postConnectRequest->cmdObj.firstElementFieldName(),
postConnectRequest->metadata,
postConnectRequest->cmdObj);
auto rcr = executor::RemoteCommandResponse(reply->getCommandReply().getOwned(),
reply->getMetadata().getOwned(),
Date_t::now() - start);
uassertStatusOK(_hook->handleReply(target, std::move(rcr)));
}
} else {
uassertStatusOK(conn->connect(target));
}
conn->port().tag |= _messagingPortTags;
if (getGlobalAuthorizationManager()->isAuthEnabled()) {
uassert(ErrorCodes::AuthenticationFailed,
"Missing credentials for authenticating as internal user",
isInternalAuthSet());
conn->auth(getInternalUserAuthParamsWithFallback());
}
lk.lock();
return _inUseConnections.insert(_inUseConnections.begin(), ConnectionInfo(conn.release(), now));
}
示例8: _distLockPingThread
void LegacyDistLockPinger::_distLockPingThread(ConnectionString addr,
const string& process,
Milliseconds sleepTime) {
setThreadName("LockPinger");
string pingId = pingThreadId(addr, process);
LOG(0) << "creating distributed lock ping thread for " << addr
<< " and process " << process << " (sleeping for " << sleepTime.count() << "ms)";
static int loops = 0;
Date_t lastPingTime = jsTime();
while (!shouldStopPinging(addr, process)) {
LOG(3) << "distributed lock pinger '" << pingId << "' about to ping.";
Date_t pingTime;
try {
ScopedDbConnection conn(addr.toString(), 30.0);
pingTime = jsTime();
const auto elapsed = pingTime - lastPingTime;
if (elapsed > 10 * sleepTime) {
warning() << "Lock pinger for addr: " << addr
<< ", proc: " << process
<< " was inactive for " << elapsed;
}
lastPingTime = pingTime;
// Refresh the entry corresponding to this process in the lockpings collection.
conn->update(LockpingsType::ConfigNS,
BSON(LockpingsType::process(process)),
BSON("$set" << BSON(LockpingsType::ping(pingTime))),
true);
string err = conn->getLastError();
if (!err.empty()) {
warning() << "pinging failed for distributed lock pinger '" << pingId << "'."
<< causedBy(err);
conn.done();
if (!shouldStopPinging(addr, process)) {
waitTillNextPingTime(sleepTime);
}
continue;
}
// Remove really old entries from the lockpings collection if they're not
// holding a lock. This may happen if an instance of a process was taken down
// and no new instance came up to replace it for a quite a while.
// NOTE this is NOT the same as the standard take-over mechanism, which forces
// the lock entry.
BSONObj fieldsToReturn = BSON(LocksType::state() << 1
<< LocksType::process() << 1);
auto activeLocks =
conn->query(LocksType::ConfigNS,
BSON(LocksType::state() << NE << LocksType::UNLOCKED));
uassert(16060,
str::stream() << "cannot query locks collection on config server "
<< conn.getHost(),
activeLocks.get());
std::set<string> pids;
while (activeLocks->more()) {
BSONObj lock = activeLocks->nextSafe();
if (!lock[LocksType::process()].eoo()) {
pids.insert(lock[LocksType::process()].str());
}
else {
warning() << "found incorrect lock document during lock ping cleanup: "
<< lock.toString();
}
}
// This can potentially delete ping entries that are actually active (if the clock
// of another pinger is too skewed). This is still fine as the lock logic only
// checks if there is a change in the ping document and the document going away
// is a valid change.
Date_t fourDays = pingTime - stdx::chrono::hours{4 * 24};
conn->remove(LockpingsType::ConfigNS,
BSON(LockpingsType::process() << NIN << pids
<< LockpingsType::ping() << LT << fourDays));
err = conn->getLastError();
if (!err.empty()) {
warning() << "ping cleanup for distributed lock pinger '" << pingId
<< " failed." << causedBy(err);
conn.done();
if (!shouldStopPinging(addr, process)) {
waitTillNextPingTime(sleepTime);
}
continue;
}
LOG(1 - (loops % 10 == 0 ? 1 : 0)) << "cluster " << addr
//.........这里部分代码省略.........
示例9: setTimeout
void MessagingPort::setTimeout(Milliseconds millis) {
double timeout = double(millis.count()) / 1000;
_psock->setTimeout(timeout);
}
示例10: Formatter
inline void Formatter(FormatData& formatData, const Milliseconds& milliseconds)
{
Formatter(formatData, milliseconds.count());
formatData.string.append(U"ms", 2);
}