本文整理汇总了C++中ITT_NOTIFY函数的典型用法代码示例。如果您正苦于以下问题:C++ ITT_NOTIFY函数的具体用法?C++ ITT_NOTIFY怎么用?C++ ITT_NOTIFY使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ITT_NOTIFY函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: __TBB_ASSERT
void* concurrent_vector_base::internal_push_back( size_type element_size, size_type& index ) {
__TBB_ASSERT( sizeof(my_early_size)==sizeof(reference_count), NULL );
//size_t tmp = __TBB_FetchAndIncrementWacquire(*(tbb::internal::reference_count*)&my_early_size);
size_t tmp = __TBB_FetchAndIncrementWacquire((tbb::internal::reference_count*)&my_early_size);
index = tmp;
segment_index_t k_old = segment_index_of( tmp );
size_type base = segment_base(k_old);
helper::extend_segment_if_necessary(*this,k_old);
segment_t& s = my_segment[k_old];
void* array = s.array;
if ( !array ) {
// FIXME - consider factoring this out and share with internal_grow_by
if ( base==tmp ) {
__TBB_ASSERT( !s.array, NULL );
size_t n = segment_size(k_old);
array = NFS_Allocate( n, element_size, NULL );
ITT_NOTIFY( sync_releasing, &s.array );
s.array = array;
} else {
ITT_NOTIFY(sync_prepare, &s.array);
spin_wait_while_eq( s.array, (void*)0 );
ITT_NOTIFY(sync_acquired, &s.array);
array = s.array;
}
}
size_type j_begin = tmp-base;
return (void*)((char*)array+element_size*j_begin);
}
示例2: __TBB_ASSERT
void spin_mutex::scoped_lock::internal_acquire( spin_mutex& m ) {
__TBB_ASSERT( !my_mutex, "already holding a lock on a spin_mutex" );
ITT_NOTIFY(sync_prepare, &m);
my_unlock_value = __TBB_LockByte(m.flag);
my_mutex = &m;
ITT_NOTIFY(sync_acquired, &m);
}
示例3: __TBB_ASSERT
/** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */
bool spin_rw_mutex_v3::internal_upgrade()
{
state_t s = state;
__TBB_ASSERT( s & READERS, "invalid state before upgrade: no readers " );
// check and set writer-pending flag
// required conditions: either no pending writers, or we are the only reader
// (with multiple readers and pending writer, another upgrade could have been requested)
while( (s & READERS)==ONE_READER || !(s & WRITER_PENDING) ) {
state_t old_s = s;
if( (s=CAS(state, s | WRITER | WRITER_PENDING, s))==old_s ) {
internal::atomic_backoff backoff;
ITT_NOTIFY(sync_prepare, this);
// the state should be 0...0111, i.e. 1 reader and waiting writer;
// both new readers and writers are blocked
while( (state & READERS) != ONE_READER ) // more than 1 reader
backoff.pause();
__TBB_ASSERT((state&(WRITER_PENDING|WRITER))==(WRITER_PENDING|WRITER),"invalid state when upgrading to writer");
__TBB_FetchAndAddW( &state, - (intptr_t)(ONE_READER+WRITER_PENDING));
ITT_NOTIFY(sync_acquired, this);
return true; // successfully upgraded
}
}
// slow reacquire
internal_release_reader();
return internal_acquire_writer(); // always returns false
}
示例4: call_itt_notify_v5
void call_itt_notify_v5(int t, void *ptr) {
switch (t) {
case 0: ITT_NOTIFY(sync_prepare, ptr); break;
case 1: ITT_NOTIFY(sync_cancel, ptr); break;
case 2: ITT_NOTIFY(sync_acquired, ptr); break;
case 3: ITT_NOTIFY(sync_releasing, ptr); break;
}
}
示例5: __TBB_ASSERT
void spin_mutex::scoped_lock::internal_release() {
__TBB_ASSERT( my_mutex, "release on spin_mutex::scoped_lock that is not holding a lock" );
ITT_NOTIFY(sync_releasing, my_mutex);
__TBB_UnlockByte(my_mutex->flag);
my_mutex = NULL;
}
示例6: ITT_NOTIFY
//! Acquire write lock on the given mutex.
bool spin_rw_mutex_v3::internal_acquire_writer()
{
ITT_NOTIFY(sync_prepare, this);
for( internal::atomic_backoff backoff;;backoff.pause() ){
state_t s = const_cast<volatile state_t&>(state); // ensure reloading
if( !(s & BUSY) ) { // no readers, no writers
if( CAS(state, WRITER, s)==s )
break; // successfully stored writer flag
backoff.reset(); // we could be very close to complete op.
} else if( !(s & WRITER_PENDING) ) { // no pending writers
__TBB_AtomicOR(&state, WRITER_PENDING);
}
}
ITT_NOTIFY(sync_acquired, this);
return false;
}
示例7: ITT_NOTIFY
void reader_writer_lock::start_read(scoped_lock_read *I) {
ITT_NOTIFY(sync_prepare, this);
I->next = reader_head.fetch_and_store(I);
if (!I->next) { // first arriving reader in my group; set RFLAG, test writer flags
// unblock and/or update statuses of non-blocking readers
if (!(fetch_and_or(rdr_count_and_flags, RFLAG) & (WFLAG1+WFLAG2))) { // no writers
unblock_readers();
}
}
__TBB_ASSERT(I->status == waiting || I->status == active, "Lock requests should be waiting or active before blocking.");
spin_wait_while_eq(I->status, waiting); // block
if (I->next) {
__TBB_ASSERT(I->next->status == waiting, NULL);
rdr_count_and_flags += RC_INCR;
I->next->status = active; // wake successor
}
ITT_NOTIFY(sync_acquired, this);
}
示例8: __TBB_ASSERT
void reader_writer_lock::end_write(scoped_lock *I) {
__TBB_ASSERT(I==writer_head, "Internal error: can't unlock a thread that is not holding the lock.");
my_current_writer = tbb_thread::id();
ITT_NOTIFY(sync_releasing, this);
if (I->next) { // potentially more writers
writer_head = I->next;
writer_head->status = active;
}
else { // No more writers; clear writer flag, test reader interest flag
__TBB_ASSERT(writer_head, NULL);
if (fetch_and_and(rdr_count_and_flags, ~(WFLAG1+WFLAG2)) & RFLAG) {
unblock_readers();
}
writer_head.fetch_and_store(NULL);
if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added
spin_wait_while_eq(I->next, (scoped_lock *)NULL); // wait for new writer to be added
__TBB_ASSERT(I->next, "There should be a node following the last writer.");
set_next_writer(I->next);
}
}
}
示例9: __TBB_ASSERT
//! Release read lock on the given mutex
void spin_rw_mutex_v3::internal_release_reader()
{
__TBB_ASSERT( state & READERS, "invalid state of a read lock: no readers" );
ITT_NOTIFY(sync_releasing, this); // release reader
__TBB_FetchAndAddWrelease( &state,-(intptr_t)ONE_READER);
}
示例10: itt_load_pointer_with_acquire_v3
void* itt_load_pointer_with_acquire_v3( const void* src ) {
void* result = __TBB_load_with_acquire(*static_cast<void*const*>(src));
ITT_NOTIFY(sync_acquired, const_cast<void*>(src));
return result;
}
示例11: itt_store_pointer_with_release_v3
void itt_store_pointer_with_release_v3( void* dst, void* src ) {
ITT_NOTIFY(sync_releasing, dst);
__TBB_store_with_release(*static_cast<void**>(dst),src);
}
示例12: __TBB_ASSERT
void arena::process( generic_scheduler& s ) {
__TBB_ASSERT( is_alive(my_guard), NULL );
__TBB_ASSERT( governor::is_set(&s), NULL );
__TBB_ASSERT( !s.innermost_running_task, NULL );
__TBB_ASSERT( my_num_slots != 1, NULL );
// Start search for an empty slot from the one we occupied the last time
unsigned index = s.arena_index < my_num_slots ? s.arena_index : s.random.get() % (my_num_slots - 1) + 1,
end = index;
__TBB_ASSERT( index != 0, "A worker cannot occupy slot 0" );
__TBB_ASSERT( index < my_num_slots, NULL );
// Find a vacant slot
for ( ;; ) {
if ( !slot[index].my_scheduler && __TBB_CompareAndSwapW( &slot[index].my_scheduler, (intptr_t)&s, 0 ) == 0 )
break;
if ( ++index == my_num_slots )
index = 1;
if ( index == end ) {
// Likely this arena is already saturated
if ( --my_num_threads_active == 0 )
close_arena();
return;
}
}
ITT_NOTIFY(sync_acquired, &slot[index]);
s.my_arena = this;
s.arena_index = index;
s.attach_mailbox( affinity_id(index+1) );
slot[index].hint_for_push = index ^ unsigned(&s-(generic_scheduler*)NULL)>>16; // randomizer seed
slot[index].hint_for_pop = index; // initial value for round-robin
unsigned new_limit = index + 1;
unsigned old_limit = my_limit;
while ( new_limit > old_limit ) {
if ( my_limit.compare_and_swap(new_limit, old_limit) == old_limit )
break;
old_limit = my_limit;
}
for ( ;; ) {
// Try to steal a task.
// Passing reference count is technically unnecessary in this context,
// but omitting it here would add checks inside the function.
__TBB_ASSERT( is_alive(my_guard), NULL );
task* t = s.receive_or_steal_task( s.dummy_task->prefix().ref_count, /*return_if_no_work=*/true );
if (t) {
// A side effect of receive_or_steal_task is that innermost_running_task can be set.
// But for the outermost dispatch loop of a worker it has to be NULL.
s.innermost_running_task = NULL;
s.local_wait_for_all(*s.dummy_task,t);
}
++my_num_threads_leaving;
__TBB_ASSERT ( slot[index].head == slot[index].tail, "Worker cannot leave arena while its task pool is not empty" );
__TBB_ASSERT( slot[index].task_pool == EmptyTaskPool, "Empty task pool is not marked appropriately" );
// Revalidate quitting condition
// This check prevents relinquishing more than necessary workers because
// of the non-atomicity of the decision making procedure
if ( num_workers_active() >= my_num_workers_allotted || !my_num_workers_requested )
break;
--my_num_threads_leaving;
__TBB_ASSERT( !slot[0].my_scheduler || my_num_threads_active > 0, "Who requested more workers after the last one left the dispatch loop and the master's gone?" );
}
#if __TBB_STATISTICS
++s.my_counters.arena_roundtrips;
*slot[index].my_counters += s.my_counters;
s.my_counters.reset();
#endif /* __TBB_STATISTICS */
__TBB_store_with_release( slot[index].my_scheduler, (generic_scheduler*)NULL );
s.inbox.detach();
__TBB_ASSERT( s.inbox.is_idle_state(true), NULL );
__TBB_ASSERT( !s.innermost_running_task, NULL );
__TBB_ASSERT( is_alive(my_guard), NULL );
// Decrementing my_num_threads_active first prevents extra workers from leaving
// this arena prematurely, but can result in some workers returning back just
// to repeat the escape attempt. If instead my_num_threads_leaving is decremented
// first, the result is the opposite - premature leaving is allowed and gratuitous
// return is prevented. Since such a race has any likelihood only when multiple
// workers are in the stealing loop, and consequently there is a lack of parallel
// work in this arena, we'd rather let them go out and try get employment in
// other arenas (before returning into this one again).
--my_num_threads_leaving;
if ( !--my_num_threads_active )
close_arena();
}