本文整理汇总了C++中std::atomic::exchange方法的典型用法代码示例。如果您正苦于以下问题:C++ atomic::exchange方法的具体用法?C++ atomic::exchange怎么用?C++ atomic::exchange使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类std::atomic
的用法示例。
在下文中一共展示了atomic::exchange方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: lock
void lock() noexcept {
std::int32_t collisions = 0, tests = 0, expected = 0;
// after max. spins or collisions suspend via futex
while ( BOOST_FIBERS_SPIN_MAX_TESTS > tests && BOOST_FIBERS_SPIN_MAX_COLLISIONS > collisions) {
// avoid using multiple pause instructions for a delay of a specific cycle count
// the delay of cpu_relax() (pause on Intel) depends on the processor family
// the cycle count can not guaranteed from one system to the next
// -> check the shared variable 'value_' in between each cpu_relax() to prevent
// unnecessarily long delays on some systems
// test shared variable 'status_'
// first access to 'value_' -> chache miss
// sucessive acccess to 'value_' -> cache hit
// if 'value_' was released by other fiber
// cached 'value_' is invalidated -> cache miss
if ( 0 != ( expected = value_.load( std::memory_order_relaxed) ) ) {
++tests;
#if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
// give CPU a hint that this thread is in a "spin-wait" loop
// delays the next instruction's execution for a finite period of time (depends on processor family)
// the CPU is not under demand, parts of the pipeline are no longer being used
// -> reduces the power consumed by the CPU
// -> prevent pipeline stalls
cpu_relax();
#else
// std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
// but only to another thread on the same processor
// instead of constant checking, a thread only checks if no other useful work is pending
std::this_thread::yield();
#endif
} else if ( ! value_.compare_exchange_strong( expected, 1, std::memory_order_acquire, std::memory_order_release) ) {
// spinlock now contended
// utilize 'Binary Exponential Backoff' algorithm
// linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
static thread_local std::minstd_rand generator;
static std::uniform_int_distribution< std::int32_t > distribution{ 0, static_cast< std::int32_t >( 1) << collisions };
const std::int32_t z = distribution( generator);
++collisions;
for ( std::int32_t i = 0; i < z; ++i) {
// -> reduces the power consumed by the CPU
// -> prevent pipeline stalls
cpu_relax();
}
} else {
// success, lock acquired
return;
}
}
// failure, lock not acquired
// pause via futex
if ( 2 != expected) {
expected = value_.exchange( 2, std::memory_order_acquire);
}
while ( 0 != expected) {
futex_wait( & value_, 2);
expected = value_.exchange( 2, std::memory_order_acquire);
}
}
示例2: print_stats
void print_stats(deadline_timer& stats_timer, error_code const& ec)
{
if (ec) return;
time_point now = steady_clock::now();
float interval = duration_cast<milliseconds>(now - stats_start).count() / 1000.f;
if (interval <= 0.f) interval = 0.001f;
stats_start = now;
printf(
#ifdef DEBUG_STATS
"node-buf: [%s %s %s %s]"
#endif
" in: %.1f"
" invalid_enc: %.1f"
" invalid_src: %.1f"
" id_failure: %.1f"
" out_ping: %.1f"
" short_tid_pong: %.1f"
" invalid_pong: %.1f"
" added: %.1f"
" backup: %.1f\n"
#ifdef DEBUG_STATS
, suffix(nodebuf_size[0].load()).c_str()
, suffix(nodebuf_size[1].load()).c_str()
, suffix(nodebuf_size[2].load()).c_str()
, suffix(nodebuf_size[3].load()).c_str()
#endif
, incoming_queries.exchange(0) / interval
, invalid_encoding.exchange(0) / interval
, invalid_src_address.exchange(0) / interval
, failed_nodeid_queries.exchange(0) / interval
, outgoing_pings.exchange(0) / interval
, short_tid_pongs.exchange(0) / interval
, invalid_pongs.exchange(0) / interval
, added_nodes.exchange(0) / interval
, backup_nodes_returned.exchange(0) / interval
);
#ifdef CLIENTS_STAT
std::lock_guard<std::mutex> l(client_mutex);
std::vector<std::pair<int, uint16_t>> ordered;
for (auto i : client_histogram) {
ordered.emplace_back(i.second, i.first);
}
std::sort(ordered.begin(), ordered.end());
for (auto i : ordered) {
printf("[%c%c: %d] ", (i.second >> 8) & 0xff, i.second & 0xff, i.first);
}
printf("\n");
client_histogram.clear();
#endif
fflush(stdout);
stats_timer.expires_from_now(boost::posix_time::seconds(print_stats_interval));
stats_timer.async_wait(std::bind(&print_stats, std::ref(stats_timer), _1));
}
示例3: wait_fast
/// Atomic dec of internal counter.
/// @return CHANGED when value is different from old_value
/// or someone updated it during wait_fast call.
/// TIMEDOUT when the thread should wait for another signal
wakeup_result wait_fast(int* old_value = NULL) {
int val = value();
if (old_value && *old_value != val) {
if (old_value)
*old_value = val;
#ifdef PERF_STATS
++m_wait_fast_count;
#endif
return wakeup_result::CHANGED;
}
int res = m_count.exchange(0, std::memory_order_acq_rel);
if (res == 0)
return wakeup_result::TIMEDOUT;
#ifdef PERF_STATS
++m_wait_fast_count;
#endif
return res == val ? wakeup_result::SIGNALED : wakeup_result::CHANGED;
}
示例4: try_pop
// returns nullptr on failure
pointer try_pop() {
pointer result = nullptr;
while (m_consumer_lock.exchange(true)) {
std::this_thread::yield();
}
// only one consumer allowed
node* first = m_first;
node* next = m_first->next;
if (next) {
// queue is not empty
result = next->value; // take it out of the node
next->value = nullptr;
// swing first forward
m_first = next;
// release exclusivity
m_consumer_lock = false;
// delete old dummy
//first->value = nullptr;
delete first;
return result;
}
else {
// release exclusivity
m_consumer_lock = false;
return nullptr;
}
}
示例5: try_reclaim
void try_reclaim(node* old_head)
{
if (threads_in_pop == 1) {
// claim list of to-be-deleted nodes
node* nodes_to_delete = to_be_deleted.exchange(nullptr);
// are you the only thread in pop()?
if (!--threads_in_pop) {
// on other thread can be accessing this list of pending nodes.
// There may be new pending nodes, but you're not bothered
// about them for now, as long as it's safe to reclaim your
// list.
delete_nodes(nodes_to_delete);
}
else if (nodes_to_delete) {
// not safe to reclaim the nodes, so if there are any,
// you must chain them back onto the list of nodes
// pending deletion.
// This can happen if there are multiple threads accessing the
// data structure concurrently. Other threads might have
// called pop() in between the first tet of thread_in_pop and
// the "claiming" of the list, potentially adding new nodes to
// the list that are still being accesed by one or more of
// those other threads.
chain_pending_nodes(nodes_to_delete);
}
delete old_head;
}
else {
// not safe to delete any nodes, add the node to the pending list
chain_pending_node(old_head);
--threads_in_pop;
}
}
示例6: reportJitMaturity
/*
* If the jit maturity counter is enabled, update it with the current amount of
* emitted code.
*/
void reportJitMaturity(const CodeCache& code) {
auto static jitMaturityCounter = ServiceData::createCounter("jit.maturity");
// Optimized translations are faster than profiling translations, which are
// faster than the interpreter. But when optimized translations are
// generated, some profiling translations will become dead. We assume the
// incremental value of an optimized translation over the corresponding
// profiling translations is comparable to the incremental value of a
// profiling translation of similar size; thus we don't have to apply
// different weights to code in different regions.
auto const codeSize =
code.hot().used() + code.main().used() + code.prof().used();
if (jitMaturityCounter) {
// EvalJitMatureSize is supposed to to be set to approximately 20% of the
// code that will give us full performance, so recover the "fully mature"
// size with some math.
auto const fullSize = RuntimeOption::EvalJitMatureSize * 5;
auto const after = codeSize >= fullSize ? 100
: (codeSize * 100 / fullSize);
auto const before = jitMaturityCounter->getValue();
if (after > before) jitMaturityCounter->setValue(after);
}
if (!s_loggedJitMature.load(std::memory_order_relaxed) &&
StructuredLog::enabled() &&
codeSize >= RuntimeOption::EvalJitMatureSize &&
!s_loggedJitMature.exchange(true, std::memory_order_relaxed)) {
StructuredLogEntry cols;
cols.setInt("jit_mature_sec", time(nullptr) - HttpServer::StartTime);
StructuredLog::log("hhvm_warmup", cols);
}
}
示例7: delete_nodes_with_no_hazards
void delete_nodes_with_no_hazards()
{
// first claims the entire list of nodes to be reclaimed;
// ensures that this is the only thread trying to reclaim
// this particular set of nodes; other threads are now free
// to add futher nodes to the list or event try to reclaim
// them without impacting the operation of this thread.
data_to_reclaim* current = nodes_to_reclaim.exchange(nullptr);
while (current) {
data_to_reclaim* const next = current->next;
// check each node in turn to see if there are any outstanding
// hazard pointers.
if (!outstanding_hazard_pointers_for(current->data)) {
// if there aren't, delete the entry
delete current;
}
else {
// otherwise, just add the item back on the list for
// reclaiming later
add_to_reclaim_list(current);
}
current=next;
}
}
示例8: States_FreezeCurrentSlot
void States_FreezeCurrentSlot()
{
// FIXME : Use of the IsSavingOrLoading flag is mostly a hack until we implement a
// complete thread to manage queuing savestate tasks, and zipping states to disk. --air
if (!SysHasValidState())
{
Console.WriteLn("Save state: Aborting (VM is not active).");
return;
}
if (wxGetApp().HasPendingSaves() || IsSavingOrLoading.exchange(true))
{
Console.WriteLn("Load or save action is already pending.");
return;
}
Sstates_updateLoadBackupMenuItem(true);
GSchangeSaveState(StatesC, SaveStateBase::GetFilename(StatesC).ToUTF8());
StateCopy_SaveToSlot(StatesC);
// Hack: Update the saveslot saying it's filled *right now* because it's still writing the file and we don't have a timestamp.
saveslot_cache[StatesC].empty = false;
saveslot_cache[StatesC].updated = wxDateTime::Now();
saveslot_cache[StatesC].crc = ElfCRC;
GetSysExecutorThread().PostIdleEvent(SysExecEvent_ClearSavingLoadingFlag());
}
示例9: UpdateCallback
static void UpdateCallback(u64 userdata, int cycles_late) {
SharedMem* mem = reinterpret_cast<SharedMem*>(shared_memory->GetPointer());
if (is_device_reload_pending.exchange(false))
LoadInputDevices();
PadState state;
state.zl.Assign(zl_button->GetStatus());
state.zr.Assign(zr_button->GetStatus());
// Get current c-stick position and update c-stick direction
float c_stick_x_f, c_stick_y_f;
std::tie(c_stick_x_f, c_stick_y_f) = c_stick->GetStatus();
constexpr int MAX_CSTICK_RADIUS = 0x9C; // Max value for a c-stick radius
const s16 c_stick_x = static_cast<s16>(c_stick_x_f * MAX_CSTICK_RADIUS);
const s16 c_stick_y = static_cast<s16>(c_stick_y_f * MAX_CSTICK_RADIUS);
if (!raw_c_stick) {
const HID::DirectionState direction = HID::GetStickDirectionState(c_stick_x, c_stick_y);
state.c_stick_up.Assign(direction.up);
state.c_stick_down.Assign(direction.down);
state.c_stick_left.Assign(direction.left);
state.c_stick_right.Assign(direction.right);
}
// TODO (wwylele): implement raw C-stick data for raw_c_stick = true
const u32 last_entry_index = mem->index;
mem->index = next_pad_index;
next_pad_index = (next_pad_index + 1) % mem->entries.size();
// Get the previous Pad state
PadState old_state{mem->entries[last_entry_index].current_state};
// Compute bitmask with 1s for bits different from the old state
PadState changed = {state.hex ^ old_state.hex};
// Get the current Pad entry
PadDataEntry& pad_entry = mem->entries[mem->index];
// Update entry properties
pad_entry.current_state.hex = state.hex;
pad_entry.delta_additions.hex = changed.hex & state.hex;
pad_entry.delta_removals.hex = changed.hex & old_state.hex;
pad_entry.c_stick_x = c_stick_x;
pad_entry.c_stick_y = c_stick_y;
// If we just updated index 0, provide a new timestamp
if (mem->index == 0) {
mem->index_reset_ticks_previous = mem->index_reset_ticks;
mem->index_reset_ticks = CoreTiming::GetTicks();
}
update_event->Signal();
// Reschedule recurrent event
CoreTiming::ScheduleEvent(msToCycles(update_period) - cycles_late, update_callback_id);
}
示例10: lock
void lock() noexcept {
std::size_t collisions = 0 ;
for (;;) {
// avoid using multiple pause instructions for a delay of a specific cycle count
// the delay of cpu_relax() (pause on Intel) depends on the processor family
// the cycle count can not guaranteed from one system to the next
// -> check the shared variable 'state_' in between each cpu_relax() to prevent
// unnecessarily long delays on some systems
std::size_t tests = 0;
// test shared variable 'status_'
// first access to 'state_' -> chache miss
// sucessive acccess to 'state_' -> cache hit
// if 'state_' was released by other fiber
// cached 'state_' is invalidated -> cache miss
while ( spinlock_status::locked == state_.load( std::memory_order_relaxed) ) {
#if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
if ( BOOST_FIBERS_SPIN_MAX_TESTS > tests) {
++tests;
// give CPU a hint that this thread is in a "spin-wait" loop
// delays the next instruction's execution for a finite period of time (depends on processor family)
// the CPU is not under demand, parts of the pipeline are no longer being used
// -> reduces the power consumed by the CPU
// -> prevent pipeline stalls
cpu_relax();
} else {
// std::this_thread::sleep_for( 0us) has a fairly long instruction path length,
// combined with an expensive ring3 to ring 0 transition costing about 1000 cycles
// std::this_thread::sleep_for( 0us) lets give up this_thread the remaining part of its time slice
// if and only if a thread of equal or greater priority is ready to run
static constexpr std::chrono::microseconds us0{ 0 };
std::this_thread::sleep_for( us0);
}
#else
std::this_thread::yield();
#endif
}
// test-and-set shared variable 'status_'
// everytime 'status_' is signaled over the bus, even if the test failes
if ( spinlock_status::locked == state_.exchange( spinlock_status::locked, std::memory_order_acquire) ) {
// spinlock now contended
// utilize 'Binary Exponential Backoff' algorithm
// linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
static thread_local std::minstd_rand generator;
static std::uniform_int_distribution< std::size_t > distribution{ 0, static_cast< std::size_t >( 1) << collisions };
const std::size_t z = distribution( generator);
++collisions;
for ( std::size_t i = 0; i < z; ++i) {
// -> reduces the power consumed by the CPU
// -> prevent pipeline stalls
cpu_relax();
}
} else {
// success, thread has acquired the lock
break;
}
}
}
示例11: bench_thread
void bench_thread(B* b) {
while (!b->should_stop()) {
lock();
assert(tester.exchange(1) == 0);
tester.store(0);
unlock();
b->inc();
}
}
示例12: exchange_consume
[[gnu::always_inline]]
inline T exchange_consume(std::atomic<T> &x, U &&desired)
{
// Wrong for DEC Alpha 21264. But OK for others and contemporary processors.
// Because relaxed and consume are same for they if supposing that compiler
// wouldn't break dependency chain (true for pointer dereference).
T res = x.exchange(std::forward<U>(desired), std::memory_order_relaxed);
asm volatile("" ::: "memory");
return res;
}
示例13: push_impl
void push_impl(node* tmp) {
// acquire exclusivity
while (m_producer_lock.exchange(true)) {
std::this_thread::yield();
}
// publish & swing last forward
m_last->next = tmp;
m_last = tmp;
// release exclusivity
m_producer_lock = false;
}
示例14: while
void TextureSystem::Details::CollectGarbage()
{
Texture::Details* garbage = g_textureGarbage.exchange(nullptr, std::memory_order_relaxed);
while (garbage != nullptr)
{
Texture::Details* next = garbage->m_next;
g_texturePool.Destroy(garbage);
garbage = next;
}
}
示例15: start
void start ()
{
assert (m_stopped == true);
assert (m_stop_called == false);
if (m_stopped.exchange (false) == true)
{
m_stop_complete.reset ();
addReference ();
}
}