本文整理汇总了C++中tbb::atomic::fetch_and_store方法的典型用法代码示例。如果您正苦于以下问题:C++ atomic::fetch_and_store方法的具体用法?C++ atomic::fetch_and_store怎么用?C++ atomic::fetch_and_store使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tbb::atomic
的用法示例。
在下文中一共展示了atomic::fetch_and_store方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: new
/*override*/ tbb::task* execute() {
if( my_depth>0 ) {
int child_count = my_child_count;
scheduler* my_sched = internal::governor::local_scheduler();
tbb::task& c = *new( allocate_continuation() ) tbb::empty_task;
c.set_ref_count( child_count );
recycle_as_child_of(c);
--child_count;
if( Producer==my_sched ) {
// produce a task and put it into Exchanger
tbb::task* t = new( c.allocate_child() ) tbb::empty_task;
--child_count;
t = Exchanger.fetch_and_store(t);
if( t ) spawn(*t);
} else {
tbb::task* t = Exchanger.fetch_and_store(NULL);
if( t ) spawn(*t);
}
while( child_count ) {
tbb::task* t = new( c.allocate_child() ) TaskGenerator(my_child_count, my_depth-1);
if( my_depth >4 ) enqueue(*t);
else spawn(*t);
--child_count;
}
--my_depth;
return this;
} else {
tbb::task* t = Exchanger.fetch_and_store(NULL);
if( t ) spawn(*t);
return NULL;
}
}
示例2: GetThreadSpecific
/*override*/ tbb::task* execute() {
if( my_depth>0 ) {
int child_count = my_child_count;
scheduler* my_sched = GetThreadSpecific();
tbb::task& c = *new( tbb::task::allocate_continuation() ) tbb::empty_task;
c.set_ref_count( child_count );
recycle_as_child_of(c);
--child_count;
if( Producer==my_sched ) {
// produce a task and put it into Exchanger
tbb::task* t = new( c.allocate_child() ) tbb::empty_task;
--child_count;
t = Exchanger.fetch_and_store(t);
if( t ) this->spawn(*t);
} else {
tbb::task* t = Exchanger.fetch_and_store(NULL);
if( t ) this->spawn(*t);
}
while( child_count ) {
c.spawn( *new( c.allocate_child() ) TaskGenerator(my_child_count, my_depth-1) );
--child_count;
}
--my_depth;
return this;
} else {
tbb::task* t = Exchanger.fetch_and_store(NULL);
if( t ) this->spawn(*t);
return NULL;
}
}
示例3: wait
void wait(ogl_device& ogl)
{
int delay = 0;
if(!ogl.invoke([this]{return ready();}, high_priority))
{
while(!ogl.invoke([this]{return ready();}, normal_priority) && delay < 20)
{
delay += 2;
Sleep(2);
}
}
static tbb::atomic<size_t> count;
static tbb::atomic<bool> warned;
if(delay > 2 && ++count > 50)
{
if(!warned.fetch_and_store(true))
{
CASPAR_LOG(warning) << L"[fence] Performance warning. GPU was not ready during requested host read-back. Delayed by atleast: " << delay << L" ms. Further warnings are sent to trace log level."
<< L" You can ignore this warning if you do not notice any problems with output video. This warning is caused by insufficent support or performance of your graphics card for OpenGL based memory transfers. "
<< L" Please try to update your graphics drivers or update your graphics card, see recommendations on (www.casparcg.com)."
<< L" Further help is available at (www.casparcg.com/forum).";
}
else
CASPAR_LOG(trace) << L"[fence] Performance warning. GPU was not ready during requested host read-back. Delayed by atleast: " << delay << L" ms.";
}
}
示例4: execute
tbb::task* execute() {
tbb::priority_t prev = g_order.fetch_and_store(my_priority);
if( my_priority != prev) {
REMARK("prev:%s --> new:%s\n", PriorityName(prev), PriorityName(my_priority));
// TODO: improve the test for concurrent workers
if(!g_order_established) {
// initial transition path allowed low->[normal]->high
if(my_priority == tbb::priority_high)
g_order_established = true;
else ASSERT(my_priority == tbb::priority_normal && prev == tbb::priority_low, NULL);
} else { //transition path allowed high->normal->low
if(prev == tbb::priority_high) ASSERT( my_priority == tbb::priority_normal, "previous priority is high - bad order");
else if(prev == tbb::priority_normal) ASSERT( my_priority == tbb::priority_low, "previous priority is normal - bad order");
else ASSERT(!g_order_established, "transition from low priority but not during initialization");
}
}
EmulateWork(0);
return NULL;
}
示例5: free_handles
void free_handles() {
const size_t size = my_size.fetch_and_store( 0 );
for (size_t i=0; i<size; ++i)
dynamic_unlink( my_handles[i] );
}
示例6: on_scheduler_leaving
/*override*/
bool on_scheduler_leaving() {
if( m_leave_ticket == 0 ) return true;
return m_leave_ticket.fetch_and_store(-1) > 0;
}