当前位置: 首页>>代码示例>>C++>>正文


C++ atomic::fetch_add方法代码示例

本文整理汇总了C++中std::atomic::fetch_add方法的典型用法代码示例。如果您正苦于以下问题:C++ atomic::fetch_add方法的具体用法?C++ atomic::fetch_add怎么用?C++ atomic::fetch_add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在std::atomic的用法示例。


在下文中一共展示了atomic::fetch_add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: watch

    bool watch(void *p, ArrayElement *a){
        
        rc_count.fetch_add(1);

        if (a->load() != p){
            rc_count.fetch_add(-1);
            return false;
        }
        return true;
    };
开发者ID:ucf-cs,项目名称:Tervel,代码行数:10,代码来源:wfv_classes.hpp

示例2: test

void test(std::atomic<int> n)
{
    ___________________________________________();
    ___________________________________________();
    
    ++n;
    ___________________________________________();
    
    n.fetch_add(1, std::memory_order_relaxed);
    ___________________________________________();
    
    n.fetch_add(1, std::memory_order_seq_cst);
    
    ___________________________________________();
    ___________________________________________();
}
开发者ID:CCJY,项目名称:coliru,代码行数:16,代码来源:main.cpp

示例3: just_spinning_barrier

bool just_spinning_barrier(int tid, int gn)
{
    unsigned int step = step_.load();
    if (nwait_.fetch_add(1) == gn - 1) {
      nwait_.store(0);
      step_.fetch_add(1);
      return true;
    } else {
      while (step_.load() == step){
       std::this_thread::yield();
      }
          
      return false;
    }

}
开发者ID:chandanpalai,项目名称:FpgaMicrotubule,代码行数:16,代码来源:mt_cpu_mt.cpp

示例4: Schedule

	void Schedule(const char * name, task_fn fn, uint32_t threadid)
	{
		BASIS_ASSERT(Scheduler != nullptr);
		
		if (threadid >=0 && threadid < ThreadCount)
		{
			scheduler_data * s = SchedulerList + threadid;
			s->privateTasks.push_back<task_entry>({ fn, basis::stralloc(name) });
			s->privateTaskCount.fetch_add(1, std::memory_order_relaxed);

			if (s != Scheduler)
			{
				SignalScheduler(s);
			}
		}
		else
		{
			BASIS_ASSERT(threadid == TACO_INVALID_THREAD_ID);
			
			Scheduler->sharedTasks.push_back<task_entry>({ fn, basis::stralloc(name) });
			uint32_t count = GlobalSharedTaskCount.fetch_add(1, std::memory_order_relaxed) + 1;
			if (count > 1 || !Scheduler->isActive)
			{
				AskForHelp(count);
			}
		}
	}
开发者ID:UIKit0,项目名称:taco,代码行数:27,代码来源:scheduler.cpp

示例5: AllocateBytes

void* fhBaseRenderList::AllocateBytes(uint32 bytes) {
	uint32 offset = allocated.fetch_add(bytes);
	assert(offset + bytes < renderlistMaxSize);
	assert(renderlistMemory);

	return &static_cast<char*>(renderlistMemory)[offset];
}
开发者ID:galek,项目名称:fhDOOM,代码行数:7,代码来源:RenderList.cpp

示例6: threadFunc

    void threadFunc(int threadNum)
    {
        std::random_device rd;
        std::mt19937 randomEngine(rd());
        int writes = 0;
        volatile int accumulator = 0;   // Prevent compiler from eliminating this variable

        for (int i = 0; i < m_iterationCount; i++)
        {
            // Choose randomly whether to read or write.
            if (std::uniform_int_distribution<>(0, 30)(randomEngine) == 0)
            {
                WriteLockGuard<NonRecursiveRWLock> guard(m_rwLock);
                m_sharedInt++;
                writes++;
            }
            else
            {
                ReadLockGuard<NonRecursiveRWLock> guard(m_rwLock);
                accumulator += m_sharedInt;
            }
        }

        m_totalWrites.fetch_add(writes, std::memory_order_relaxed);
    }
开发者ID:Hincoin,项目名称:cpp11-on-multicore,代码行数:25,代码来源:simplerwlocktester.cpp

示例7: allocate_slot

	/**
	*	@brief	Allocates a new slot for a texture and returns a shared pointer to the new slot.
	*			Slot lifetime is tied to the pointer's lifetime.
	*
	*	@param	tex		Texture to insert
	*/
	auto allocate_slot(texture_t &&tex,
					   image_layout layout = default_layout) {
		// Find a location for the slot: If there are tombstones, replace one of them with new element, if possible
		optional<std::uint32_t> location;
		{
			std::unique_lock<std::mutex> l(tombstones_mutex);

			if (tombstones.size()) {
				location = std::prev(tombstones.end())->start;
				tombstones.pop_back();
			}
		}

		// If no tombstones, use a location past the vector's end.
		if (!location)
			location = count.fetch_add(1);

		// Create slot and pipeline image, we can do that without a lock
		value_type val = lib::allocate_shared<slot_t>(slot_t::token(),
													  std::move(tex),
													  *this,
													  location.get());
		auto img = image_t(val->tex, layout);
		
		// Update changes data
		{
			std::unique_lock<std::mutex> l(general_mutex);
			add_change(location.get(),
					   std::move(img));
		}

		return val;
	}
开发者ID:ssteinberg,项目名称:ste,代码行数:39,代码来源:image_vector.hpp

示例8: GetSocket

Socket* SocketPool::GetSocket() {
  for ( ; ; ) {
    Socket* sock = nullptr;
    {
      std::unique_lock<std::mutex> lock(mtx_);
      if (pool_.empty()) {
        break;
      }

      sock = pool_.back();
      pool_.pop_back();
    }

    pool_count_.fetch_sub(1, std::memory_order_relaxed);

    if (sock && sock->IsValid()) {
      return sock;
    }

    delete sock;
  }

  if (pool_count_.load(std::memory_order_relaxed) < pool_size_) {
    pool_count_.fetch_add(1, std::memory_order_relaxed);
    return new Socket(remote_side_.ip, remote_side_.port);
  }

  return nullptr;
}
开发者ID:spch2008,项目名称:codelab,代码行数:29,代码来源:socket_pool.cpp

示例9: DNNModelThreadForward

DWORD DNNModelThreadForward(ThreadLayerState *tl)
{
    SetTrainingThreadAffinity(tl->_threadNum);

#ifdef PREALLOCATE_THREAD_BUFFERS
//	tl->PrintSparsity();
#else
    float **inputActivation = new float *[tl->_numLayers];
    float **outputActivation = new float *[tl->_numLayers];
    for (int i = tl->_startLayer; i < tl->_numLayers; i++)
    {
        inputActivation[i] = new float[tl->_LayerState[i]._InputSize];
        outputActivation[i] = new float[tl->_LayerState[i]._OutputSize];
    }
#endif

    while (true)
    {
		INT64 sampleId = g_CurrentSamplePos.fetch_add(1);
		if (sampleId >= G_SAMPLE_COUNT) break;
        int numLayers = ((sampleId % G_WORKER_COUNT) == 0) ? tl->_numLayers : tl->_numLayers-1;
        for (int l = tl->_startLayer; l < numLayers; l++) {
            
#ifdef PREPARE_COMPUTE_DATA
            std::vector<std::vector<float>>& layerActivations = g_Activations[l];
            int activationId = sampleId % layerActivations.size();
            std::vector<float>& activationVector = layerActivations[activationId];
            Sparsify(layerActivations[activationId], G_FORWARD_SPARSITY);
            const float* inpACT = &activationVector[0];
#elif PREALLOCATE_THREAD_BUFFERS
            float *inpACT = tl->_inputActivation[l];
#else
            float *inpACT = inputActivation[l];
            Sparsify(inpACT, tl->_LayerState[l]._InputSize, G_FORWARD_SPARSITY, G_ACTIVATION_CACHELINE_SPARSITY);
#endif
            float *outACT = tl->_outputActivation[l];
            Layer *layer = (tl->_LayerState + l);

            DECLARE_TIMER(timer);
            START_TIMER(timer);
            g_DNNKernels._feedForward(layer, inpACT, outACT);           
            STOP_TIMER(timer);
            tl->_FLOPTime[l] += ELAPSED_USEC_TIME(timer);

            tl->_SampleCount[l]++;
        }
    }

#ifndef PREALLOCATE_THREAD_BUFFERS
    for (int i = tl->_startLayer; i < tl->_numLayers; i++)
    {
        delete [] inputActivation[i];
        delete [] outputActivation[i];
    }
        
    delete []inputActivation;
    delete []outputActivation;
#endif 
    return 0;
}
开发者ID:tjruwase,项目名称:sc-dnn,代码行数:60,代码来源:Main.cpp

示例10: ALOGV

 virtual ~JavaBBinder()
 {
     ALOGV("Destroying JavaBBinder %p\n", this);
     gNumLocalRefsDeleted.fetch_add(1, memory_order_relaxed);
     JNIEnv* env = javavm_to_jnienv(mVM);
     env->DeleteGlobalRef(mObject);
 }
开发者ID:MIPS,项目名称:frameworks-base,代码行数:7,代码来源:android_util_Binder.cpp

示例11: JavaBBinder

 JavaBBinder(JNIEnv* env, jobject /* Java Binder */ object)
     : mVM(jnienv_to_javavm(env)), mObject(env->NewGlobalRef(object))
 {
     ALOGV("Creating JavaBBinder %p\n", this);
     gNumLocalRefsCreated.fetch_add(1, std::memory_order_relaxed);
     gcIfManyNewRefs(env);
 }
开发者ID:MIPS,项目名称:frameworks-base,代码行数:7,代码来源:android_util_Binder.cpp

示例12: WaitForGpuThread

/* This function checks the emulated CPU - GPU distance and may wake up the GPU,
 * or block the CPU if required. It should be called by the CPU thread regularly.
 * @ticks The gone emulated CPU time.
 * @return A good time to call WaitForGpuThread() next.
 */
static int WaitForGpuThread(int ticks)
{
  const SConfig& param = SConfig::GetInstance();

  int old = s_sync_ticks.fetch_add(ticks);
  int now = old + ticks;

  // GPU is idle, so stop polling.
  if (old >= 0 && s_gpu_mainloop.IsDone())
    return -1;

  // Wakeup GPU
  if (old < param.iSyncGpuMinDistance && now >= param.iSyncGpuMinDistance)
    RunGpu();

  // If the GPU is still sleeping, wait for a longer time
  if (now < param.iSyncGpuMinDistance)
    return GPU_TIME_SLOT_SIZE + param.iSyncGpuMinDistance - now;

  // Wait for GPU
  if (now >= param.iSyncGpuMaxDistance)
    s_sync_wakeup_event.Wait();

  return GPU_TIME_SLOT_SIZE;
}
开发者ID:stenzek,项目名称:dolphin,代码行数:30,代码来源:Fifo.cpp

示例13: lk

std::shared_ptr<HiresTexture>
HiresTexture::Search(const std::string& basename,
                     std::function<u8*(size_t)> request_buffer_delegate)
{
  if (g_ActiveConfig.bCacheHiresTextures)
  {
    std::unique_lock<std::mutex> lk(s_textureCacheMutex);

    auto iter = s_textureCache.find(basename);
    if (iter != s_textureCache.end())
    {
      HiresTexture* current = iter->second.get();
      u8* dst = request_buffer_delegate(current->m_cached_data_size);
      memcpy(dst, current->m_cached_data.get(), current->m_cached_data_size);
      return iter->second;
    }
    lk.unlock();
    if (size_sum.load() < max_mem)
    {
      std::shared_ptr<HiresTexture> ptr(
          Load(basename, [](size_t requested_size) { return new u8[requested_size]; }, true));
      lk.lock();
      if (ptr)
      {
        s_textureCache[basename] = ptr;
        HiresTexture* current = ptr.get();
        size_sum.fetch_add(current->m_cached_data_size);
        u8* dst = request_buffer_delegate(current->m_cached_data_size);
        memcpy(dst, current->m_cached_data.get(), current->m_cached_data_size);
      }
      return ptr;
    }
  }
  return std::shared_ptr<HiresTexture>(Load(basename, request_buffer_delegate, false));
}
开发者ID:Tinob,项目名称:Ishiiruka,代码行数:35,代码来源:HiresTextures.cpp

示例14: operator

 void operator()(BlockingQueue<int> &queue) {
   for (int i = 0; i < size; ++i) {
     int value = product_item.fetch_add(1);
     if (!queue.Push(value)) {
       std::cout << "failed to push item " << value << " into queue\n";
     }
   }
 }
开发者ID:haitaoy,项目名称:thread_pool_service,代码行数:8,代码来源:blocking_queue_test.cpp

示例15: MemoryCheck

 MemoryCheck(const MemoryCheck& x)
 {
     // We have to do this to make sure that destructor calls are paired
     //
     // Really, copy constructor should be deletable, but CCheckQueue breaks
     // if it is deleted because of internal push_back.
     fake_allocated_memory.fetch_add(b, std::memory_order_relaxed);
 };
开发者ID:iobond,项目名称:aib,代码行数:8,代码来源:checkqueue_tests.cpp


注:本文中的std::atomic::fetch_add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。