本文整理汇总了C++中stdx::unique_lock::unlock方法的典型用法代码示例。如果您正苦于以下问题:C++ unique_lock::unlock方法的具体用法?C++ unique_lock::unlock怎么用?C++ unique_lock::unlock使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类stdx::unique_lock
的用法示例。
在下文中一共展示了unique_lock::unlock方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cvWaitUntilWithClockSource
static NOINLINE_DECL stdx::cv_status cvWaitUntilWithClockSource(ClockSource* clockSource,
stdx::condition_variable& cv,
stdx::unique_lock<stdx::mutex>& m,
Date_t deadline) {
if (deadline <= clockSource->now()) {
return stdx::cv_status::timeout;
}
struct AlarmInfo {
stdx::mutex controlMutex;
stdx::mutex* waitMutex;
stdx::condition_variable* waitCV;
stdx::cv_status cvWaitResult = stdx::cv_status::no_timeout;
};
auto alarmInfo = std::make_shared<AlarmInfo>();
alarmInfo->waitCV = &cv;
alarmInfo->waitMutex = m.mutex();
invariantOK(clockSource->setAlarm(deadline, [alarmInfo] {
stdx::lock_guard<stdx::mutex> controlLk(alarmInfo->controlMutex);
alarmInfo->cvWaitResult = stdx::cv_status::timeout;
if (!alarmInfo->waitMutex) {
return;
}
stdx::lock_guard<stdx::mutex> waitLk(*alarmInfo->waitMutex);
alarmInfo->waitCV->notify_all();
}));
cv.wait(m);
m.unlock();
stdx::lock_guard<stdx::mutex> controlLk(alarmInfo->controlMutex);
m.lock();
alarmInfo->waitMutex = nullptr;
alarmInfo->waitCV = nullptr;
return alarmInfo->cvWaitResult;
}
示例2: scheduleIntoPool_inlock
void ThreadPoolTaskExecutor::scheduleIntoPool_inlock(WorkQueue* fromQueue,
const WorkQueue::iterator& begin,
const WorkQueue::iterator& end,
stdx::unique_lock<stdx::mutex> lk) {
dassert(fromQueue != &_poolInProgressQueue);
std::vector<std::shared_ptr<CallbackState>> todo(begin, end);
_poolInProgressQueue.splice(_poolInProgressQueue.end(), *fromQueue, begin, end);
lk.unlock();
if (MONGO_FAIL_POINT(scheduleIntoPoolSpinsUntilThreadPoolShutsDown)) {
scheduleIntoPoolSpinsUntilThreadPoolShutsDown.setMode(FailPoint::off);
while (_pool->schedule([] {}) != ErrorCodes::ShutdownInProgress) {
sleepmillis(100);
}
}
for (const auto& cbState : todo) {
const auto status = _pool->schedule([this, cbState] { runCallback(std::move(cbState)); });
if (status == ErrorCodes::ShutdownInProgress)
break;
fassert(28735, status);
}
_net->signalWorkAvailable();
}
示例3: decltype
// Drop connections and fail all requests
void ConnectionPool::SpecificPool::processFailure(const Status& status,
stdx::unique_lock<stdx::mutex> lk) {
// Bump the generation so we don't reuse any pending or checked out
// connections
_generation++;
// Drop ready connections
_readyPool.clear();
// Migrate processing connections to the dropped pool
for (auto&& x : _processingPool) {
_droppedProcessingPool[x.first] = std::move(x.second);
}
_processingPool.clear();
// Move the requests out so they aren't visible
// in other threads
decltype(_requests) requestsToFail;
{
using std::swap;
swap(requestsToFail, _requests);
}
// Update state to reflect the lack of requests
updateStateInLock();
// Drop the lock and process all of the requests
// with the same failed status
lk.unlock();
while (requestsToFail.size()) {
requestsToFail.top().second(status);
requestsToFail.pop();
}
}
示例4: consumeTasks
/**
* Consumes available tasks.
*
* We distinguish between calls to consume on the networking thread and off of
* it. For off thread calls, we try to initiate a consume via setAlarm, while on
* it we invoke directly. This allows us to use the network interface's threads
* as our own pool, which should reduce context switches if our tasks are
* getting scheduled by network interface tasks.
*/
void NetworkInterfaceThreadPool::consumeTasks(stdx::unique_lock<stdx::mutex> lk) {
if (_consumingTasks || _tasks.empty())
return;
if (!(_inShutdown || _net->onNetworkThread())) {
if (!_registeredAlarm) {
_registeredAlarm = true;
lk.unlock();
_net->setAlarm(_net->now(),
[this] {
stdx::unique_lock<stdx::mutex> lk(_mutex);
_registeredAlarm = false;
consumeTasks(std::move(lk));
});
}
return;
}
_consumingTasks = true;
const auto consumingTasksGuard = MakeGuard([&] { _consumingTasks = false; });
decltype(_tasks) tasks;
while (_tasks.size()) {
using std::swap;
swap(tasks, _tasks);
lk.unlock();
const auto lkGuard = MakeGuard([&] { lk.lock(); });
for (auto&& task : tasks) {
try {
task();
} catch (...) {
severe() << "Exception escaped task in network interface thread pool";
std::terminate();
}
}
tasks.clear();
}
if (_joining)
_joiningCondition.notify_one();
}
示例5: MakeGuard
// fulfills as many outstanding requests as possible
void ConnectionPool::SpecificPool::fulfillRequests(stdx::unique_lock<stdx::mutex>& lk) {
// If some other thread (possibly this thread) is fulfilling requests,
// don't keep padding the callstack.
if (_inFulfillRequests)
return;
_inFulfillRequests = true;
auto guard = MakeGuard([&] { _inFulfillRequests = false; });
while (_requests.size()) {
// _readyPool is an LRUCache, so its begin() object is the MRU item.
auto iter = _readyPool.begin();
if (iter == _readyPool.end())
break;
// Grab the connection and cancel its timeout
auto conn = std::move(iter->second);
_readyPool.erase(iter);
conn->cancelTimeout();
if (!conn->isHealthy()) {
log() << "dropping unhealthy pooled connection to " << conn->getHostAndPort();
if (_readyPool.empty()) {
log() << "after drop, pool was empty, going to spawn some connections";
// Spawn some more connections to the bad host if we're all out.
spawnConnections(lk);
}
// Drop the bad connection.
conn.reset();
// Retry.
continue;
}
// Grab the request and callback
auto cb = std::move(_requests.top().second);
_requests.pop();
auto connPtr = conn.get();
// check out the connection
_checkedOutPool[connPtr] = std::move(conn);
updateStateInLock();
// pass it to the user
connPtr->resetToUnknown();
lk.unlock();
cb(ConnectionHandle(connPtr, ConnectionHandleDeleter(_parent)));
lk.lock();
}
}
示例6: _processAlarms
void ClockSourceMock::_processAlarms(stdx::unique_lock<stdx::mutex> lk) {
using std::swap;
invariant(lk.owns_lock());
std::vector<Alarm> readyAlarms;
std::vector<Alarm>::iterator iter;
auto alarmIsNotExpired = [&](const Alarm& alarm) { return alarm.first > _now; };
auto expiredAlarmsBegin = std::partition(_alarms.begin(), _alarms.end(), alarmIsNotExpired);
std::move(expiredAlarmsBegin, _alarms.end(), std::back_inserter(readyAlarms));
_alarms.erase(expiredAlarmsBegin, _alarms.end());
lk.unlock();
for (const auto& alarm : readyAlarms) {
alarm.second();
}
}
示例7: _clearAllAlarmsImpl
void AlarmSchedulerPrecise::_clearAllAlarmsImpl(stdx::unique_lock<stdx::mutex>& lk) {
std::vector<Promise<void>> toExpire;
for (AlarmMapIt it = _alarms.begin(); it != _alarms.end();) {
toExpire.push_back(std::move(it->second.promise));
auto handle = it->second.handle.lock();
if (handle) {
handle->setDone();
}
it = _alarms.erase(it);
}
lk.unlock();
for (auto& alarm : toExpire) {
alarm.setError({ErrorCodes::CallbackCanceled, "Alarm scheduler was cleared"});
}
}
示例8: max
// spawn enough connections to satisfy open requests and minpool, while
// honoring maxpool
void ConnectionPool::SpecificPool::spawnConnections(stdx::unique_lock<stdx::mutex>& lk,
const HostAndPort& hostAndPort) {
// We want minConnections <= outstanding requests <= maxConnections
auto target = [&] {
return std::max(
_parent->_options.minConnections,
std::min(_requests.size() + _checkedOutPool.size(), _parent->_options.maxConnections));
};
// While all of our inflight connections are less than our target
while (_readyPool.size() + _processingPool.size() + _checkedOutPool.size() < target()) {
// make a new connection and put it in processing
auto handle = _parent->_factory->makeConnection(hostAndPort, _generation);
auto connPtr = handle.get();
_processingPool[connPtr] = std::move(handle);
++_created;
// Run the setup callback
lk.unlock();
connPtr->setup(_parent->_options.refreshTimeout,
[this](ConnectionInterface* connPtr, Status status) {
connPtr->indicateUsed();
stdx::unique_lock<stdx::mutex> lk(_parent->_mutex);
auto conn = takeFromProcessingPool(connPtr);
if (conn->getGeneration() != _generation) {
// If the host and port was dropped, let the
// connection lapse
} else if (status.isOK()) {
addToReady(lk, std::move(conn));
} else {
// If the setup failed, cascade the failure edge
processFailure(status, std::move(lk));
}
});
// Note that this assumes that the refreshTimeout is sound for the
// setupTimeout
lk.lock();
}
}
示例9: MakeGuard
// fulfills as many outstanding requests as possible
void ConnectionPool::SpecificPool::fulfillRequests(stdx::unique_lock<stdx::mutex>& lk) {
// If some other thread (possibly this thread) is fulfilling requests,
// don't keep padding the callstack.
if (_inFulfillRequests)
return;
_inFulfillRequests = true;
auto guard = MakeGuard([&] { _inFulfillRequests = false; });
while (_requests.size()) {
auto iter = _readyPool.begin();
if (iter == _readyPool.end())
break;
// Grab the connection and cancel its timeout
auto conn = std::move(iter->second);
_readyPool.erase(iter);
conn->cancelTimeout();
// Grab the request and callback
auto cb = std::move(_requests.top().second);
_requests.pop();
auto connPtr = conn.get();
// check out the connection
_checkedOutPool[connPtr] = std::move(conn);
updateStateInLock();
// pass it to the user
lk.unlock();
cb(ConnectionHandle(connPtr, ConnectionHandleDeleter(_parent)));
lk.lock();
}
}
示例10: max
// spawn enough connections to satisfy open requests and minpool, while
// honoring maxpool
void ConnectionPool::SpecificPool::spawnConnections(stdx::unique_lock<stdx::mutex>& lk) {
// If some other thread (possibly this thread) is spawning connections,
// don't keep padding the callstack.
if (_inSpawnConnections)
return;
_inSpawnConnections = true;
auto guard = MakeGuard([&] { _inSpawnConnections = false; });
// We want minConnections <= outstanding requests <= maxConnections
auto target = [&] {
return std::max(
_parent->_options.minConnections,
std::min(_requests.size() + _checkedOutPool.size(), _parent->_options.maxConnections));
};
// While all of our inflight connections are less than our target
while (_readyPool.size() + _processingPool.size() + _checkedOutPool.size() < target()) {
std::unique_ptr<ConnectionPool::ConnectionInterface> handle;
try {
// make a new connection and put it in processing
handle = _parent->_factory->makeConnection(_hostAndPort, _generation);
} catch (std::system_error& e) {
severe() << "Failed to construct a new connection object: " << e.what();
fassertFailed(40336);
}
auto connPtr = handle.get();
_processingPool[connPtr] = std::move(handle);
++_created;
// Run the setup callback
lk.unlock();
connPtr->setup(
_parent->_options.refreshTimeout, [this](ConnectionInterface* connPtr, Status status) {
connPtr->indicateUsed();
stdx::unique_lock<stdx::mutex> lk(_parent->_mutex);
auto conn = takeFromProcessingPool(connPtr);
if (conn->getGeneration() != _generation) {
// If the host and port was dropped, let the
// connection lapse
} else if (status.isOK()) {
addToReady(lk, std::move(conn));
} else if (status.code() == ErrorCodes::NetworkInterfaceExceededTimeLimit) {
// If we've exceeded the time limit, restart the connect, rather than
// failing all operations. We do this because the various callers
// have their own time limit which is unrelated to our internal one.
spawnConnections(lk);
} else {
// If the setup failed, cascade the failure edge
processFailure(status, std::move(lk));
}
});
// Note that this assumes that the refreshTimeout is sound for the
// setupTimeout
lk.lock();
}
}
示例11: takeFromPool
void ConnectionPool::SpecificPool::returnConnection(ConnectionInterface* connPtr,
stdx::unique_lock<stdx::mutex> lk) {
auto needsRefreshTP = connPtr->getLastUsed() + _parent->_options.refreshRequirement;
auto conn = takeFromPool(_checkedOutPool, connPtr);
updateStateInLock();
// Users are required to call indicateSuccess() or indicateFailure() before allowing
// a connection to be returned. Otherwise, we have entered an unknown state.
invariant(conn->getStatus() != kConnectionStateUnknown);
if (conn->getGeneration() != _generation) {
// If the connection is from an older generation, just return.
return;
}
if (!conn->getStatus().isOK()) {
// TODO: alert via some callback if the host is bad
log() << "Ending connection to host " << _hostAndPort << " due to bad connection status; "
<< openConnections(lk) << " connections to that host remain open";
return;
}
auto now = _parent->_factory->now();
if (needsRefreshTP <= now) {
// If we need to refresh this connection
if (_readyPool.size() + _processingPool.size() + _checkedOutPool.size() >=
_parent->_options.minConnections) {
// If we already have minConnections, just let the connection lapse
log() << "Ending idle connection to host " << _hostAndPort
<< " because the pool meets constraints; " << openConnections(lk)
<< " connections to that host remain open";
return;
}
_processingPool[connPtr] = std::move(conn);
// Unlock in case refresh can occur immediately
lk.unlock();
connPtr->refresh(_parent->_options.refreshTimeout,
[this](ConnectionInterface* connPtr, Status status) {
connPtr->indicateUsed();
stdx::unique_lock<stdx::mutex> lk(_parent->_mutex);
auto conn = takeFromProcessingPool(connPtr);
// If the host and port were dropped, let this lapse
if (conn->getGeneration() != _generation)
return;
// If we're in shutdown, we don't need refreshed connections
if (_state == State::kInShutdown)
return;
// If the connection refreshed successfully, throw it back in the ready
// pool
if (status.isOK()) {
addToReady(lk, std::move(conn));
return;
}
// If we've exceeded the time limit, start a new connect, rather than
// failing all operations. We do this because the various callers have
// their own time limit which is unrelated to our internal one.
if (status.code() == ErrorCodes::NetworkInterfaceExceededTimeLimit) {
log() << "Pending connection to host " << _hostAndPort
<< " did not complete within the connection timeout,"
<< " retrying with a new connection;" << openConnections(lk)
<< " connections to that host remain open";
spawnConnections(lk);
return;
}
// Otherwise pass the failure on through
processFailure(status, std::move(lk));
});
lk.lock();
} else {
// If it's fine as it is, just put it in the ready queue
addToReady(lk, std::move(conn));
}
updateStateInLock();
}
示例12: takeFromPool
void ConnectionPool::SpecificPool::returnConnection(ConnectionInterface* connPtr,
stdx::unique_lock<stdx::mutex> lk) {
auto needsRefreshTP = connPtr->getLastUsed() + _parent->_options.refreshRequirement;
auto conn = takeFromPool(_checkedOutPool, connPtr);
updateStateInLock();
// Users are required to call indicateSuccess() or indicateFailure() before allowing
// a connection to be returned. Otherwise, we have entered an unknown state.
invariant(conn->getStatus() != kConnectionStateUnknown);
if (conn->getGeneration() != _generation) {
// If the connection is from an older generation, just return.
return;
}
if (!conn->getStatus().isOK()) {
// TODO: alert via some callback if the host is bad
return;
}
auto now = _parent->_factory->now();
if (needsRefreshTP <= now) {
// If we need to refresh this connection
if (_readyPool.size() + _processingPool.size() + _checkedOutPool.size() >=
_parent->_options.minConnections) {
// If we already have minConnections, just let the connection lapse
return;
}
_processingPool[connPtr] = std::move(conn);
// Unlock in case refresh can occur immediately
lk.unlock();
connPtr->refresh(_parent->_options.refreshTimeout,
[this](ConnectionInterface* connPtr, Status status) {
connPtr->indicateUsed();
stdx::unique_lock<stdx::mutex> lk(_parent->_mutex);
auto conn = takeFromProcessingPool(connPtr);
// If the host and port were dropped, let this lapse
if (conn->getGeneration() != _generation)
return;
// If we're in shutdown, we don't need refreshed connections
if (_state == State::kInShutdown)
return;
// If the connection refreshed successfully, throw it back in the ready
// pool
if (status.isOK()) {
addToReady(lk, std::move(conn));
return;
}
// Otherwise pass the failure on through
processFailure(status, std::move(lk));
});
lk.lock();
} else {
// If it's fine as it is, just put it in the ready queue
addToReady(lk, std::move(conn));
}
updateStateInLock();
}