本文整理汇总了C++中std::atomic::load方法的典型用法代码示例。如果您正苦于以下问题:C++ atomic::load方法的具体用法?C++ atomic::load怎么用?C++ atomic::load使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类std::atomic
的用法示例。
在下文中一共展示了atomic::load方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: consumer
void consumer()
{
std::cout << "Consumer start.\n";
std::string *p2;
while( !(p2 = ptr.load(std::memory_order_acquire)) )
{} // spin
assert(*p2 == "Hello");
assert(data == 42);
std::cout << "Consumer end.\n";
}
示例2: lookup
unsigned lookup(const StringData* name) {
assert(g_initFlag.load(std::memory_order_acquire) ||
pthread_equal(s_initThread.load(std::memory_order_acquire),
pthread_self()));
if (auto const ptr = folly::get_ptr(s_instanceBitsMap, name)) {
assert(*ptr >= 1 && *ptr < kNumInstanceBits);
return *ptr;
}
return 0;
}
示例3: getMask
bool getMask(const StringData* name, int& offset, uint8_t& mask) {
assert(g_initFlag.load(std::memory_order_acquire));
unsigned bit = lookup(name);
if (!bit) return false;
const size_t bitWidth = sizeof(mask) * CHAR_BIT;
offset = Class::instanceBitsOff() + bit / bitWidth * sizeof(mask);
mask = 1u << (bit % bitWidth);
return true;
}
示例4: DrawTexture
void SWRenderer::DrawTexture(u8 *texture, int width, int height)
{
// FIXME: This should add black bars when the game has set the VI to render less than the full xfb.
// Save screenshot
if (s_bScreenshot.load())
{
std::lock_guard<std::mutex> lk(s_criticalScreenshot);
TextureToPng(texture, width * 4, s_sScreenshotName, width, height, false);
// Reset settings
s_sScreenshotName.clear();
s_bScreenshot.store(false);
}
GLsizei glWidth = (GLsizei)GLInterface->GetBackBufferWidth();
GLsizei glHeight = (GLsizei)GLInterface->GetBackBufferHeight();
// Update GLViewPort
glViewport(0, 0, glWidth, glHeight);
glScissor(0, 0, glWidth, glHeight);
glBindTexture(GL_TEXTURE_2D, s_RenderTarget);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, (GLsizei)width, (GLsizei)height, 0, GL_RGBA, GL_UNSIGNED_BYTE, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glUseProgram(program);
static const GLfloat verts[4][2] = {
{ -1, -1}, // Left top
{ -1, 1}, // left bottom
{ 1, 1}, // right bottom
{ 1, -1} // right top
};
static const GLfloat texverts[4][2] = {
{0, 1},
{0, 0},
{1, 0},
{1, 1}
};
glVertexAttribPointer(attr_pos, 2, GL_FLOAT, GL_FALSE, 0, verts);
glVertexAttribPointer(attr_tex, 2, GL_FLOAT, GL_FALSE, 0, texverts);
glEnableVertexAttribArray(attr_pos);
glEnableVertexAttribArray(attr_tex);
glUniform1i(uni_tex, 0);
glActiveTexture(GL_TEXTURE0);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
glDisableVertexAttribArray(attr_pos);
glDisableVertexAttribArray(attr_tex);
glBindTexture(GL_TEXTURE_2D, 0);
}
示例5: close
// close the thread pool - this will wait for all queued up jobs to finish
// additional jobs will not allowed to get posted to the queue for scheduling
void close() {
if (isActive.load()){
// only stop the threads if transitioning
// from active to inactive
isActive.store(false);
condition.notify_all();
for (std::thread& t : mWorkers) {
t.join();
}
}
}
示例6: try_value_pop_
channel_op_status try_value_pop_( slot *& s, std::size_t & idx) {
idx = consumer_idx_.load( std::memory_order_relaxed);
for (;;) {
s = & slots_[idx & (capacity_ - 1)];
std::size_t cycle = s->cycle.load( std::memory_order_acquire);
std::intptr_t diff{ static_cast< std::intptr_t >( cycle) - static_cast< std::intptr_t >( idx + 1) };
if ( 0 == diff) {
if ( consumer_idx_.compare_exchange_weak( idx, idx + 1, std::memory_order_relaxed) ) {
break;
}
} else if ( 0 > diff) {
return channel_op_status::empty;
} else {
idx = consumer_idx_.load( std::memory_order_relaxed);
}
}
// incrementing the slot cycle must be deferred till the value has been consumed
// slot cycle tells procuders that the cell can be re-used (store new value)
return channel_op_status::success;
}
示例7: main
int main()
{
x=false;
y=false;
z=0;
std::thread a(write_x_then_y);
std::thread b(read_y_then_x);
a.join();
b.join();
assert(z.load()!=0);
}
示例8: lock
void lock() noexcept {
std::int32_t collisions = 0, tests = 0, expected = 0;
// after max. spins or collisions suspend via futex
while ( BOOST_FIBERS_SPIN_MAX_TESTS > tests && BOOST_FIBERS_SPIN_MAX_COLLISIONS > collisions) {
// avoid using multiple pause instructions for a delay of a specific cycle count
// the delay of cpu_relax() (pause on Intel) depends on the processor family
// the cycle count can not guaranteed from one system to the next
// -> check the shared variable 'value_' in between each cpu_relax() to prevent
// unnecessarily long delays on some systems
// test shared variable 'status_'
// first access to 'value_' -> chache miss
// sucessive acccess to 'value_' -> cache hit
// if 'value_' was released by other fiber
// cached 'value_' is invalidated -> cache miss
if ( 0 != ( expected = value_.load( std::memory_order_relaxed) ) ) {
++tests;
#if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
// give CPU a hint that this thread is in a "spin-wait" loop
// delays the next instruction's execution for a finite period of time (depends on processor family)
// the CPU is not under demand, parts of the pipeline are no longer being used
// -> reduces the power consumed by the CPU
cpu_relax();
#else
// std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
// but only to another thread on the same processor
// instead of constant checking, a thread only checks if no other useful work is pending
std::this_thread::yield();
#endif
} else if ( ! value_.compare_exchange_strong( expected, 1, std::memory_order_acquire, std::memory_order_release) ) {
// spinlock now contended
// utilize 'Binary Exponential Backoff' algorithm
// linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
static thread_local std::minstd_rand generator;
const std::int32_t z = std::uniform_int_distribution< std::int32_t >{
0, static_cast< std::int32_t >( 1) << collisions }( generator);
++collisions;
for ( std::int32_t i = 0; i < z; ++i) {
cpu_relax();
}
} else {
// success, lock acquired
return;
}
}
// failure, lock not acquired
// pause via futex
if ( 2 != expected) {
expected = value_.exchange( 2, std::memory_order_acquire);
}
while ( 0 != expected) {
futex_wait( & value_, 2);
expected = value_.exchange( 2, std::memory_order_acquire);
}
}
示例9: reader
void reader() {
while (true) {
folly::rcu_reader r;
std::ostringstream oss;
IntArray* cur_ptr = data.load();
for (int i = 0; i < ELE_NUM; ++i) {
oss << cur_ptr->data[i] << " ";
}
printf("%s\n", oss.str().c_str());
}
}
示例10:
/**
* @brief Default Destructor
*/
~SharedWriteLock()
{
if (reference == nullptr || reference->operator--() > 0)
return;
if (locked->load() == true)
rw_mutex->writeUnlock();
delete reference;
delete locked;
};
示例11: acquire
pc_region* acquire()
{
pc_sys_anchor cmp (head.load(rl::memory_order_relaxed));
pc_sys_anchor xchg;
do
{
xchg.refcnt = cmp.refcnt + 2;
xchg.region = cmp.region;
}
while (false == head.compare_exchange_weak(cmp, xchg, rl::memory_order_acquire));
return cmp.region;
}
示例12: get_jvm_env
JNIEnv* get_jvm_env()
{
abort_if_no_jvm();
JNIEnv* env = nullptr;
auto result = JVM.load()->AttachCurrentThread(&env, nullptr);
if (result != JNI_OK)
{
throw std::runtime_error("Could not attach to JVM");
}
return env;
}
示例13: push
void push(unsigned index, int data)
{
node* n = new node ();
n->VAR(data_) = data;
node* next = head_.load(std::memory_order_relaxed);
for (;;)
{
n->next_.store(next, rl::memory_order_relaxed);
if (head_.compare_exchange_weak(next, n, rl::memory_order_release))
break;
}
}
示例14: call_void
void call_void()
{
++count_call_void;
// make sure this function is not concurrently invoked
HPX_TEST_EQ(count_active_call_void.fetch_add(1) + 1, 1);
hpx::this_thread::suspend(std::chrono::microseconds(100));
--count_active_call_void;
HPX_TEST_EQ(count_active_call_void.load(), 0);
}
示例15: RunGpuOnCpu
static int RunGpuOnCpu(int ticks)
{
SCPFifoStruct& fifo = CommandProcessor::fifo;
bool reset_simd_state = false;
int available_ticks = int(ticks * SConfig::GetInstance().fSyncGpuOverclock) + s_sync_ticks.load();
while (fifo.bFF_GPReadEnable && fifo.CPReadWriteDistance && !AtBreakpoint() &&
available_ticks >= 0)
{
if (s_use_deterministic_gpu_thread)
{
ReadDataFromFifoOnCPU(fifo.CPReadPointer);
s_gpu_mainloop.Wakeup();
}
else
{
if (!reset_simd_state)
{
FPURoundMode::SaveSIMDState();
FPURoundMode::LoadDefaultSIMDState();
reset_simd_state = true;
}
ReadDataFromFifo(fifo.CPReadPointer);
u32 cycles = 0;
s_video_buffer_read_ptr = OpcodeDecoder::Run(
DataReader(s_video_buffer_read_ptr, s_video_buffer_write_ptr), &cycles, false);
available_ticks -= cycles;
}
if (fifo.CPReadPointer == fifo.CPEnd)
fifo.CPReadPointer = fifo.CPBase;
else
fifo.CPReadPointer += 32;
fifo.CPReadWriteDistance -= 32;
}
CommandProcessor::SetCPStatusFromGPU();
if (reset_simd_state)
{
FPURoundMode::LoadSIMDState();
}
// Discard all available ticks as there is nothing to do any more.
s_sync_ticks.store(std::min(available_ticks, 0));
// If the GPU is idle, drop the handler.
if (available_ticks >= 0)
return -1;
// Always wait at least for GPU_TIME_SLOT_SIZE cycles.
return -available_ticks + GPU_TIME_SLOT_SIZE;
}