本文整理汇总了C++中std::atomic::compare_exchange_weak方法的典型用法代码示例。如果您正苦于以下问题:C++ atomic::compare_exchange_weak方法的具体用法?C++ atomic::compare_exchange_weak怎么用?C++ atomic::compare_exchange_weak使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类std::atomic
的用法示例。
在下文中一共展示了atomic::compare_exchange_weak方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: enqueue_node
bool enqueue_node(typename super::mailbox_element* node,
int next_state = ready) {
CPPA_REQUIRE(node->marked == false);
if (this->m_mailbox._push_back(node)) {
for (;;) {
int state = m_state.load();
switch (state) {
case blocked: {
if (m_state.compare_exchange_weak(state, next_state)) {
CPPA_REQUIRE(this->m_scheduler != nullptr);
if (next_state == ready) {
this->m_scheduler->enqueue(this);
}
return true;
}
break;
}
case about_to_block: {
if (m_state.compare_exchange_weak(state, ready)) {
return false;
}
break;
}
default: return false;
}
}
}
return false;
}
示例2: fetch_new_data
// atomically sets m_stack back and enqueues all elements to the cache
bool fetch_new_data(pointer end_ptr) {
CPPA_REQUIRE(m_head == nullptr);
CPPA_REQUIRE(!end_ptr || end_ptr == stack_empty_dummy());
pointer e = m_stack.load();
// must not be called on a closed queue
CPPA_REQUIRE(e != nullptr);
// it's enough to check this once, since only the owner is allowed
// to close the queue and only the owner is allowed to call this
// member function
while (e != end_ptr) {
if (m_stack.compare_exchange_weak(e, end_ptr)) {
if (is_dummy(e)) {
// only use-case for this is closing a queue
CPPA_REQUIRE(end_ptr == nullptr);
return false;
}
while (e) {
CPPA_REQUIRE(!is_dummy(e));
auto next = e->next;
e->next = m_head;
m_head = e;
e = next;
}
return true;
}
// next iteration
}
return false;
}
示例3: chain_pending_nodes
void chain_pending_nodes(node* first, node* last)
{
last->next = to_be_deleted;
while (!to_be_deleted.compare_exchange_weak(
last->next, first));
}
示例4: instantiate
my_type const& instantiate() {
static std::aligned_storage<
sizeof(my_type), alignof(my_type)
>::type storage;
static std::atomic<int> flag;
while (flag < 2) {
// all threads spin until the object is properly initialized
int expected = 0;
if (flag.compare_exchange_weak(expected, 1)) {
// only one thread succeeds at the compare_exchange.
try {
::new (&storage) my_type;
} catch(...) {
// Initialization failed. Let another thread try.
flag = 0;
throw;
}
// Success!
std::atexit([] {
reinterpret_cast<my_type&>(storage).~my_type();
});
flag = 2;
}
}
return reinterpret_cast<my_type&>(storage);
}
示例5: fetch_new_data
// atomically sets stack_ back and enqueues all elements to the cache
bool fetch_new_data(pointer end_ptr) {
CAF_ASSERT(end_ptr == nullptr || end_ptr == stack_empty_dummy());
pointer e = stack_.load();
// must not be called on a closed queue
CAF_ASSERT(e != nullptr);
// fetching data while blocked is an error
CAF_ASSERT(e != reader_blocked_dummy());
// it's enough to check this once, since only the owner is allowed
// to close the queue and only the owner is allowed to call this
// member function
while (e != end_ptr) {
if (stack_.compare_exchange_weak(e, end_ptr)) {
// fetching data while blocked is an error
CAF_ASSERT(e != reader_blocked_dummy());
if (is_dummy(e)) {
// only use-case for this is closing a queue
CAF_ASSERT(end_ptr == nullptr);
return false;
}
while (e) {
CAF_ASSERT(! is_dummy(e));
auto next = e->next;
e->next = head_;
head_ = e;
e = next;
}
return true;
}
// next iteration
}
return false;
}
示例6: push
// Concurrent writes to the same cache element can result in invalid cache
// elements, causing pointer address not being available in the cache even
// though they should be, i.e. false cache misses. While can cause a
// slow-down, the cost for keeping the cache thread-local or atomic is
// much higher (yes, this was measured).
void push(const void* P) {
unsigned acquiredVal = mostRecent;
while(!mostRecent.compare_exchange_weak(acquiredVal, (acquiredVal+1)%lines.size())) {
acquiredVal = mostRecent;
}
lines[acquiredVal] = P;
}
示例7: pop
std::shared_ptr<T> pop()
{
node* old_head=head.load();
while(old_head &&
!head.compare_exchange_weak(old_head,old_head->next));
return old_head ? old_head->data : std::shared_ptr<T>();
}
示例8: pop
int pop(unsigned index)
{
node* n = 0;
for (;;)
{
n = smr_acquire(index, head_);
if (0 == n)
break;
node* next = n->next_.load(rl::memory_order_relaxed);
if (head_.compare_exchange_weak(n, next, rl::memory_order_acquire))
break;
smr_release(index);
}
smr_release(index);
if (n)
{
int data = n->VAR(data_);
smr_defer(index, n);
return data;
}
else
{
return 0;
}
}
示例9: push
void push(T const& data)
{
node* const new_node = new node(data);
new_node->next = head.load();
while (!head.compare_exchange_weak(new_node->next, new_node));
}
示例10: clearTag
void clearTag(std::atomic<Node::Ptr> &loc, std::memory_order mo) {
// We want to just xadd(-1) the thing, but C++ doesn't let us
// because of the level of obstruction^Wabstraction that
// tagged_ptr adds.
//
// Or maybe what we want to do is to align Node on 256 boundaries
// so that we can do a one byte write to clear the locked flag.
// That is *especially* not a thing in the C++ memory model.
#if CLEAR_RMW
// This is probably undefined
auto &intloc = reinterpret_cast<std::atomic<uintptr_t> &>(loc);
intloc.fetch_and(~Node::Ptr::kTagBits, mo);
#elif CLEAR_BYTE_WRITE
// This is certainly undefined, and only works on little endian
// C++ really does not have any story for mixed-size atomics
// and mixed-size atomics are pretty funky in practice.
// Linux does do this on some platforms, though.
auto &byteloc = reinterpret_cast<std::atomic<uint8_t> &>(loc);
byteloc.store(0, mo);
#else
Node::Ptr state(nullptr, 1);
while (!loc.compare_exchange_weak(state, Node::Ptr(state, 0),
mo, std::memory_order_relaxed)) {
}
#endif
}
示例11: push
void push(const T& data)
{
node *const new_node = new node(data);
new_node->next = head.load();
// loop to gurantee that last->next is correct
while (!head.compare_exchange_weak(new_node->next, new_node));
}
示例12: Wait
void CondVar::Wait()
{
unsigned dwWaitingForSignal = m_dwWaitingForSignal.load(std::memory_order_seq_cst);
m_dwWaitingForSignal.store(dwWaitingForSignal + 1, std::memory_order_seq_cst);
RL_ASSERT(m_lMutex.load(std::memory_order_seq_cst) < 0);
int lMutex = m_lMutex.load(std::memory_order_seq_cst);
for (;;)
{
unsigned dwWaitingToOwn = lMutex & 0x7FFFFFFFu;
RL_ASSERT(dwWaitingToOwn >= dwWaitingForSignal);
if (dwWaitingToOwn == dwWaitingForSignal)
{
if (m_lMutex.compare_exchange_weak(lMutex, dwWaitingToOwn + 1, std::memory_order_seq_cst))
break;
}
else
{
SetEvent(m_xhEvtEnter);
break;
}
}
WaitForSingleObject(m_xhSemRelease, INFINITE);
WaitForSingleObject(m_xhEvtEnter, INFINITE);
RL_ASSERT(m_lMutex.load(std::memory_order_seq_cst) < 0);
}
示例13: push
void push(T* what) {
T* e = m_stack.load();
for (;;) {
what->next = e;
if (!e) {
lock_type lock(m_mtx);
if (m_stack.compare_exchange_weak(e, what)) {
m_cv.notify_one();
return;
}
}
// compare_exchange_weak stores the
// new value to e if the operation fails
else if (m_stack.compare_exchange_weak(e, what)) return;
}
}
示例14: append
void append (int val) { // append an element to the list
Node* oldHead = list_head;
Node* newNode = new Node {val,oldHead};
// what follows is equivalent to: list_head = newNode, but in a thread-safe way:
while (!list_head.compare_exchange_weak(oldHead,newNode))
newNode->next = oldHead;
}
示例15: registerConsumer
// Register trace consumer
void trace::registerConsumer(trace::TraceConsumer *Consumer) {
TraceConsumerListNode *Node = new TraceConsumerListNode {Consumer, nullptr};
do {
Node->Next = consumers.load(std::memory_order_relaxed);
} while(!consumers.compare_exchange_weak(Node->Next, Node,
std::memory_order_release,
std::memory_order_relaxed));
}