本文整理汇总了C++中scopedLock函数的典型用法代码示例。如果您正苦于以下问题:C++ scopedLock函数的具体用法?C++ scopedLock怎么用?C++ scopedLock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了scopedLock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: LogPrintf
OsmAnd::GPUAPI::AtlasTextureInGPU::~AtlasTextureInGPU()
{
const int tilesRemaining =
#if OSMAND_DEBUG
_tiles.size();
#else
_tilesCounter.load();
#endif
if (tilesRemaining > 0)
LogPrintf(LogSeverityLevel::Error, "By the time of atlas texture destruction, it still contained %d allocated slots", tilesRemaining);
assert(tilesRemaining == 0);
// Clear all references to this atlas
{
QMutexLocker scopedLock(&pool->_freedSlotsMutex);
pool->_freedSlots.remove(this);
}
{
QMutexLocker scopedLock(&pool->_unusedSlotsMutex);
if (pool->_lastNonFullAtlasTexture == this)
{
pool->_lastNonFullAtlasTexture = nullptr;
pool->_lastNonFullAtlasTextureWeak.reset();
}
}
}
示例2: scopedLock
OsmAnd::RasterizerEnvironment_P::~RasterizerEnvironment_P()
{
{
QMutexLocker scopedLock(&_shadersBitmapsMutex);
_shadersBitmaps.clear();
}
{
QMutexLocker scopedLock(&_pathEffectsMutex);
for(auto& pathEffect : _pathEffects)
pathEffect->unref();
}
}
示例3: scopedLock
ptr_lib::shared_ptr<DataBlock>
FrameBuffer::acquireData(const ndn::Interest& interest, ndn::Name& nalType )
{
lock_guard<recursive_mutex> scopedLock(syncMutex_);
ptr_lib::shared_ptr<DataBlock> data;
std::map<ndn::Name, ptr_lib::shared_ptr<DataBlock> >::reverse_iterator re_iter;
//iter = activeSlots_.find(interest.getName());
ndn::Name name;
for( re_iter = activeSlots_.rbegin(); re_iter != activeSlots_.rend(); ++re_iter )
{
name = re_iter->first;
if( interest.getName().equals(name.getPrefix(interest.getName().size())))
{
nalType = name.getSubName(-2);
break;
}
}
if (re_iter!=activeSlots_.rend()) //if found
{
data = re_iter->second;
//cout << "********** size:" << segBlock->size() << " &data=" << (void*)(segBlock->dataPtr()) << endl;
}
return data;
}
示例4: scopedLock
std::unique_ptr<AutoConditionLock> AutoConditionLock::waitAndAcquire(
const std::shared_ptr<WaitableMutexWrapper>& manager, nsecs_t waitTime) {
if (manager == nullptr || manager->mMutex == nullptr) {
// Bad input, return null
return std::unique_ptr<AutoConditionLock> {nullptr};
}
// Acquire scoped lock
std::unique_ptr<AutoConditionLock> scopedLock(new AutoConditionLock(manager));
// Figure out what time in the future we should hit the timeout
nsecs_t failTime = systemTime(SYSTEM_TIME_MONOTONIC) + waitTime;
// Wait until we timeout, or success
while(manager->mState) {
status_t ret = manager->mCondition.waitRelative(*(manager->mMutex), waitTime);
if (ret != NO_ERROR) {
// Timed out or whatever, return null
return std::unique_ptr<AutoConditionLock> {nullptr};
}
waitTime = failTime - systemTime(SYSTEM_TIME_MONOTONIC);
}
// Set the condition and return
manager->mState = true;
return scopedLock;
}
示例5: scopedLock
bool SSDBClient::Connect(const std::string& ip, int port, const std::string& auth)
{
Base::Mutex::ScopedLock scopedLock(m_oMutex);
m_bConnected = false;
m_Client = ssdb::Client::connect(ip.c_str(), port);
if (!m_Client) return false;
if (!auth.empty())
{
const std::vector<std::string>* rsp = m_Client->request("AUTH", auth);
ssdb::Status status(rsp);
if (status.ok())
{
m_bConnected = true;
}
else
{
delete m_Client;
m_Client = NULL;
}
}
else
{
m_bConnected = true;
}
TSeqArrayResults results;
keys("*", results);
return m_bConnected;
}
示例6: scopedLock
bool
SubscriberDelegate::is_group_coherent() const
{
org::opensplice::core::ScopedObjectLock scopedLock(*this);
return this->qos_.delegate().policy<dds::core::policy::Presentation>().delegate().coherent_access() &&
this->qos_.delegate().policy<dds::core::policy::Presentation>().delegate().access_scope() == dds::core::policy::PresentationAccessScopeKind::GROUP;
}
示例7: scopedLock
void
PublisherDelegate::default_datawriter_qos(const dds::pub::qos::DataWriterQos& dwqos)
{
org::opensplice::core::ScopedObjectLock scopedLock(*this);
dwqos.delegate().check();
this->default_dwqos_ = dwqos;
}
示例8: scopedLock
void ThreadPool::InsertTask(ITask* pTask)
{
ScopedLock scopedLock(mMutex);
mTaskList.push_back(pTask);
mTaskLeftCount++;
mGetTaskCV.WakeSingle();
}
示例9: scopedLock
void LockManager::cleanupUnusedLocks() {
for (unsigned i = 0; i < _numLockBuckets; i++) {
LockBucket* bucket = &_lockBuckets[i];
scoped_spinlock scopedLock(bucket->mutex);
LockHeadMap::iterator it = bucket->data.begin();
while (it != bucket->data.end()) {
LockHead* lock = it->second;
if (lock->grantedModes == 0) {
invariant(lock->grantedModes == 0);
invariant(lock->grantedQueue == NULL);
invariant(lock->conflictModes == 0);
invariant(lock->conflictQueueBegin == NULL);
invariant(lock->conflictQueueEnd == NULL);
invariant(lock->conversionsCount == 0);
bucket->data.erase(it++);
delete lock;
}
else {
it++;
}
}
}
}
示例10: invariant
void LockManager::downgrade(LockRequest* request, LockMode newMode) {
invariant(request->lock);
invariant(request->status == LockRequest::STATUS_GRANTED);
invariant(request->recursiveCount > 0);
// The conflict set of the newMode should be a subset of the conflict set of the old mode.
// Can't downgrade from S -> IX for example.
invariant((LockConflictsTable[request->mode] | LockConflictsTable[newMode])
== LockConflictsTable[request->mode]);
LockHead* lock = request->lock;
LockBucket* bucket = _getBucket(lock->resourceId);
SimpleMutex::scoped_lock scopedLock(bucket->mutex);
invariant(lock->grantedQueueBegin != NULL);
invariant(lock->grantedQueueEnd != NULL);
invariant(lock->grantedModes != 0);
lock->changeGrantedModeCount(newMode, LockHead::Increment);
lock->changeGrantedModeCount(request->mode, LockHead::Decrement);
request->mode = newMode;
_onLockModeChanged(lock, true);
}
示例11: scopedLock
void MetadataManager::beginReceive(const ChunkRange& range) {
stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);
// Collection is not known to be sharded if the active metadata tracker is null
invariant(_activeMetadataTracker);
// If range is contained within pending chunks, this means a previous migration must have failed
// and we need to clean all overlaps
RangeVector overlappedChunks;
getRangeMapOverlap(_receivingChunks, range.getMin(), range.getMax(), &overlappedChunks);
for (const auto& overlapChunkMin : overlappedChunks) {
auto itRecv = _receivingChunks.find(overlapChunkMin.first);
invariant(itRecv != _receivingChunks.end());
const ChunkRange receivingRange(itRecv->first, itRecv->second);
_receivingChunks.erase(itRecv);
// Make sure any potentially partially copied chunks are scheduled to be cleaned up
_addRangeToClean_inlock(receivingRange);
}
// Need to ensure that the background range deleter task won't delete the range we are about to
// receive
_removeRangeToClean_inlock(range);
_receivingChunks.insert(std::make_pair(range.getMin().getOwned(), range.getMax().getOwned()));
// For compatibility with the current range deleter, update the pending chunks on the collection
// metadata to include the chunk being received
ChunkType chunk;
chunk.setMin(range.getMin());
chunk.setMax(range.getMax());
_setActiveMetadata_inlock(_activeMetadataTracker->metadata->clonePlusPending(chunk));
}
示例12: invariant
bool LockManager::unlock(LockRequest* request) {
invariant(request->lock);
// Fast path for decrementing multiple references of the same lock. It is safe to do this
// without locking, because 1) all calls for the same lock request must be done on the same
// thread and 2) if there are lock requests hanging of a given LockHead, then this lock
// will never disappear.
request->recursiveCount--;
if ((request->status == LockRequest::STATUS_GRANTED) && (request->recursiveCount > 0)) {
return false;
}
LockHead* lock = request->lock;
LockBucket* bucket = _getBucket(lock->resourceId);
scoped_spinlock scopedLock(bucket->mutex);
invariant(lock->grantedQueue != NULL);
invariant(lock->grantedModes != 0);
if (request->status == LockRequest::STATUS_WAITING) {
// This cancels a pending lock request
invariant(request->recursiveCount == 0);
lock->removeFromConflictQueue(request);
lock->changeConflictModeCount(request->mode, LockHead::Decrement);
}
else if (request->status == LockRequest::STATUS_CONVERTING) {
// This cancels a pending convert request
invariant(request->recursiveCount > 0);
// Lock only goes from GRANTED to CONVERTING, so cancelling the conversion request
// brings it back to the previous granted mode.
request->status = LockRequest::STATUS_GRANTED;
lock->conversionsCount--;
lock->changeGrantedModeCount(request->convertMode, LockHead::Decrement);
request->convertMode = MODE_NONE;
_onLockModeChanged(lock, lock->grantedCounts[request->convertMode] == 0);
}
else if (request->status == LockRequest::STATUS_GRANTED) {
// This releases a currently held lock and is the most common path, so it should be
// as efficient as possible.
invariant(request->recursiveCount == 0);
// Remove from the granted list
lock->removeFromGrantedQueue(request);
lock->changeGrantedModeCount(request->mode, LockHead::Decrement);
_onLockModeChanged(lock, lock->grantedCounts[request->mode] == 0);
}
else {
// Invalid request status
invariant(false);
}
return (request->recursiveCount == 0);
}
示例13: scopedLock
bool ElevationMap::update(const grid_map::Matrix& varianceUpdate, const grid_map::Matrix& horizontalVarianceUpdateX,
const grid_map::Matrix& horizontalVarianceUpdateY,
const grid_map::Matrix& horizontalVarianceUpdateXY, const ros::Time& time)
{
boost::recursive_mutex::scoped_lock scopedLock(rawMapMutex_);
const auto& size = rawMap_.getSize();
if (!((Index(varianceUpdate.rows(), varianceUpdate.cols()) == size).all()
&& (Index(horizontalVarianceUpdateX.rows(), horizontalVarianceUpdateX.cols()) == size).all()
&& (Index(horizontalVarianceUpdateY.rows(), horizontalVarianceUpdateY.cols()) == size).all()
&& (Index(horizontalVarianceUpdateXY.rows(), horizontalVarianceUpdateXY.cols()) == size).all())) {
ROS_ERROR("The size of the update matrices does not match.");
return false;
}
rawMap_.get("variance") += varianceUpdate;
rawMap_.get("horizontal_variance_x") += horizontalVarianceUpdateX;
rawMap_.get("horizontal_variance_y") += horizontalVarianceUpdateY;
rawMap_.get("horizontal_variance_xy") += horizontalVarianceUpdateXY;
clean();
rawMap_.setTimestamp(time.toNSec());
return true;
}
示例14: ROS_WARN
void ElevationMapping::mapUpdateTimerCallback(const ros::TimerEvent&)
{
ROS_WARN("Elevation map is updated without data from the sensor.");
boost::recursive_mutex::scoped_lock scopedLock(map_.getRawDataMutex());
stopMapUpdateTimer();
Time time = Time::now();
// Update map from motion prediction.
if (!updatePrediction(time)) {
ROS_ERROR("Updating process noise failed.");
resetMapUpdateTimer();
return;
}
// Publish elevation map.
map_.publishRawElevationMap();
if (isContinouslyFusing_ && map_.hasFusedMapSubscribers()) {
map_.fuseAll(true);
map_.publishFusedElevationMap();
}
resetMapUpdateTimer();
}
示例15: scopedLock
bool
org::opensplice::core::EntitySet::contains(const dds::core::InstanceHandle& handle)
{
org::opensplice::core::ScopedMutexLock scopedLock(this->mutex);
bool contains = false;
WeakReferenceSet<ObjectDelegate::weak_ref_type>::iterator it;
for (it = this->entities.begin(); !contains && (it != this->entities.end()); it++) {
org::opensplice::core::ObjectDelegate::ref_type ref = it->lock();
if (ref) {
org::opensplice::core::EntityDelegate::ref_type entity =
OSPL_CXX11_STD_MODULE::dynamic_pointer_cast<EntityDelegate>(ref);
/* Check if current entity is the one that is searched for. */
contains = (entity->instance_handle() == handle);
if (!contains) {
/* Check if current entity contains the one searched for. */
contains = entity->contains_entity(handle);
}
}
}
return contains;
}