本文整理汇总了C++中std::atomic_int::load方法的典型用法代码示例。如果您正苦于以下问题:C++ atomic_int::load方法的具体用法?C++ atomic_int::load怎么用?C++ atomic_int::load使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类std::atomic_int
的用法示例。
在下文中一共展示了atomic_int::load方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: update
inline bool update()
{
if (windowId.load() == s_currentNativeWindowId.load())
return false;
s_currentNativeWindowMutex.lock();
window = s_currentNativeWindow;
windowId.store(s_currentNativeWindowId.load());
s_currentNativeWindowMutex.unlock();
return true;
}
示例2: print
void print(){
if(size==0)
printf("<empty>\n");
for(int i = 0; i < size;i++)
printf("%d ", arr[(head.load() + i)%capacity]);
printf("\n");
}
示例3: func
TEST(JobSystem, JobSystemParallelChildren) {
v = 0;
JobSystem js;
js.adopt();
struct User {
std::atomic_int calls = {0};
void func(JobSystem&, JobSystem::Job*) {
v++;
calls++;
};
} j;
JobSystem::Job* root = js.createJob<User, &User::func>(nullptr, &j);
for (int i=0 ; i<256 ; i++) {
JobSystem::Job* job = js.createJob<User, &User::func>(root, &j);
js.run(job);
}
js.runAndWait(root);
EXPECT_EQ(257, v.load());
EXPECT_EQ(257, j.calls);
js.emancipate();
}
示例4: sleep
void spring_time::sleep(bool forceThreadSleep)
{
if (forceThreadSleep) {
spring::this_thread::sleep_for(chrono::nanoseconds(toNanoSecsi()));
return;
}
// for very short time intervals use a yielding loop (yield is ~5x more accurate than sleep(), check the UnitTest)
if (toMicroSecsi() < (avgThreadSleepTimeMicroSecs + avgThreadYieldTimeMicroSecs * 5)) {
const spring_time s = gettime();
while ((gettime() - s) < *this)
thread_yield();
return;
}
// expected wakeup time
const spring_time t0 = gettime() + *this;
spring::this_thread::sleep_for(chrono::nanoseconds(toNanoSecsi()));
const spring_time t1 = gettime();
const spring_time dt = t1 - t0;
if (t1 >= t0) {
// yes, it's not 100% thread correct, but it's okay when 1 of 1 million writes is dropped
int avg = avgThreadSleepTimeMicroSecs.load();
int newAvg = mix<float>(avg, dt.toMicroSecsf(), 0.1f);
avgThreadSleepTimeMicroSecs.store(newAvg);
}
}
示例5: worker_thread
void worker_thread(int i, class Barrier& barrier1, class Barrier& barrier2)
{
{
// std::lock_guard<std::mutex> lk(m);
std::lock_guard<std::mutex> lk_guard(m);
std::cout << "Worker thread " << i << " has arrived." << std::endl;
}
barrier1.wait();
{
std::lock_guard<std::mutex> lk_guard(m);
std::cout << "counts changing2, processed_all: " << processed_all << ", count > max:" << (arrived_count < allowed_max) << std::endl;
arrived_count++;
processed_count--;
std::cout << "counts changing3, processed: " << processed_count << ", arrived:" << arrived_count<< std::endl;
}
{
std::lock_guard<std::mutex> lk_guard(m);
if (arrived_count.load() >= allowed_max) {
std::cout << "Enough arrived, about to open floodgate #1" << std::endl;
arrived_all = true;
processed_all = false;
// for (int i = 0; i < allowed_max; i++) {
// cv1.notify_one();
// }
}
std::cout << "about to reach 2nd barrier:" << arrived_count<< std::endl;
}
// std::unique_lock<std::mutex> lk1(barrier1);
barrier2.wait();
// cv1.wait(lk1, []{return (processed_count < allowed_max) && arrived_all;});
{
std::lock_guard<std::mutex> lk_guard(m);
processed_count++;
arrived_count--;
}
// critical section would go here... then increment processed_count
// report after critical section
{
std::lock_guard<std::mutex> lk(m);
std::cout << "Worker thread " << i << " data processing completed" << std::endl;
if (processed_count == allowed_max) {
std::cout << "Group finished, about to open floodgate #2" << std::endl;
processed_all = true;
arrived_all = false;
// for (int i = 0; i < allowed_max; i++) {
// cv2.notify_one();
// }
}
}
}
示例6: stepLeft
void stepLeft() {
std::unique_lock<std::mutex> lock(mutex);
for (int i = 0; i < 10; ++i) {
std::cout << "left" << std::endl;
isWaiting.fetch_add(1);
if (isWaiting.load() % 2 != 0) {
condVar.notify_one();
}
else {
condVar.wait(lock);
}
}
}
示例7: testit
void testit(std::atomic_int& count)
{
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(1, 10);
auto sleep_time = std::bind(distribution, generator);
std::this_thread::sleep_for(std::chrono::microseconds(sleep_time()));
++count;
if(count.load() == 5) {
g_condition.notify_one();
}
}
示例8: add
bool add(int val){
enqLock.lock();
if((size+1) > capacity){
if(addremdbg) printf("af %d\n", val);
enqLock.unlock();
return false;
}
if(addremdbg) printf("ap %d\n", val);
arr[tail] = val;
tail = (tail.load()+1)%capacity;
size++;
enqLock.unlock();
return true;
}
示例9: thread_yield
static void thread_yield()
{
const spring_time t0 = spring_time::gettime();
this_thread::yield();
const spring_time t1 = spring_time::gettime();
const spring_time dt = t1 - t0;
if (t1 >= t0) {
// yes, it's not 100% thread correct, but it's okay when 1 of 1 million writes is dropped
int avg = avgThreadYieldTimeMicroSecs.load();
int newAvg = mix<float>(avg, dt.toMicroSecsf(), 0.1f);
avgThreadYieldTimeMicroSecs.store(newAvg);
}
}
示例10: main
int main () {
using namespace shmdata;
{
// direct access writer with one reader
Writer w("/tmp/check-stress",
sizeof(Frame),
"application/x-check-shmdata",
&logger);
assert(w);
// init
{
Frame frame;
assert(w.copy_to_shm(&frame, sizeof(Frame)));
}
Reader r("/tmp/check-stress",
[](void *data, size_t size){
// auto frame = static_cast<Frame *>(data);
// std::cout << "(0) new data for client "
// << frame->count
// << " (size " << size << ")"
// << std::endl;
},
nullptr,
nullptr,
&logger);
std::cout << "one reader" << std::endl;
assert(r);
auto reader_handle = std::async(std::launch::async, reader);
while (1 != done.load()) {
// the following is locking the shared memory for writing
auto access = w.get_one_write_access();
assert(access);
access->notify_clients(sizeof(Frame));
auto frame = static_cast<Frame *>(access->get_mem());
frame->count++;
}
assert(reader_handle.get());
}
std::this_thread::sleep_for (std::chrono::milliseconds(1000));
std::cout << "fin" << std::endl;
return 0;
}
示例11: main
int main(int argc, char** argv)
{
if (argc != 8)
{
printf("%s sAddr1 sAddr2 sAddr3 iChannelNum iWorkerNumPerChannel iCountPerWorker iEntityNum\n",
argv[0]);
exit(-1);
}
std::vector<std::string> vecIPList;
for (int i = 1; i <= 3; ++i)
vecIPList.push_back(argv[i]);
int iChannelNum = atoi(argv[4]);
int iWorkerNumPerChannel = atoi(argv[5]);
int iCountPerWorker = atoi(argv[6]);
uint64_t iEntityNum = atoi(argv[7]);
clsUUIDGenerator::GetInstance()->Init();
for (int i = 0; i < iChannelNum; ++i)
{
std::shared_ptr<clsClient> poClient(new clsClient(&vecIPList));
for (int j = 0; j < iWorkerNumPerChannel; ++j)
{
clsRWStressTester* poTester = new clsRWStressTester(poClient, iCountPerWorker, iEntityNum);
poTester->Start();
}
}
while (s_iStopCnt.load() == 0)
{
sleep(1);
s_oStat.Print();
}
printf("Fail %lu\n", s_iFailCnt.load());
return 0;
}
示例12: router
static void *doit(int id, Config &conf)
{
FCGX_Request request;
if(FCGX_InitRequest(&request, socketId.load(), 0) != 0)
{
//ошибка при инициализации структуры запроса
printf("Can not init request\n");
return NULL;
}
Router router(&request, &conf);
router.addHandler("OPTIONS", "/users/login", &OptUsersLogin);
router.addHandler("GET", "/users/login", &UsersInfo);
router.addHandler("POST", "/users/login", &PostUsersLogin);
router.addHandler("OPTIONS", "/users/add", &OptUsersAdd);
router.addHandler("POST", "/users/add", &PostUsersAdd);
router.addHandler("OPTIONS", ".*", &OptDirs);
router.addHandler("OPTIONS", "/dirs/(?<id>\\d+)", &OptDirs);
router.addHandler("POST", "/dirs", &PostCreateDir);
router.addHandler("GET", "/dirs/(\\d+)", &GetDir);
router.addHandler("DELETE", "/dirs/(\\d+)", &DelDir);
router.addHandler("POST", "/files/(\\d+)/(.+)", &PutFile);
router.addHandler("GET", "/files/(\\d+)", &GetFile);
router.addHandler("DELETE", "/files/(\\d+)", &DelFile);
for(;;)
{
static pthread_mutex_t accept_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_lock(&accept_mutex);
int rc = FCGX_Accept_r(&request);
pthread_mutex_unlock(&accept_mutex);
if(rc < 0)
{
//ошибка при получении запроса
printf("Can not accept new request\n");
break;
}
std::streambuf * cin_streambuf = std::cin.rdbuf();
std::streambuf * cout_streambuf = std::cout.rdbuf();
std::streambuf * cerr_streambuf = std::cerr.rdbuf();
fcgi_streambuf cin_fcgi_streambuf(request.in);
fcgi_streambuf cout_fcgi_streambuf(request.out);
fcgi_streambuf cerr_fcgi_streambuf(request.err);
std::cin.rdbuf(&cin_fcgi_streambuf);
std::cout.rdbuf(&cout_fcgi_streambuf);
std::cerr.rdbuf(&cerr_fcgi_streambuf);
try
{
router.Run();
}
catch (Error &e)
{
router.SetStatus(e.http_code());
router.AddHeader("Content-Type", "application/json; charset=utf-8");
router.AddContent(e.what());
router.AcceptContent();
}
catch (std::exception &e)
{
std::cerr << e.what();
router.SetStatus(Httpstatus::InternalServerError);
router.AddHeader("Content-Type", "text/plain; charset=utf-8");
router.AddContent(e.what());
router.AcceptContent();
}
FCGX_Finish_r(&request);
//завершающие действия - запись статистики, логгирование ошибок и т.п.
router.Cleanup();
std::cin.rdbuf(cin_streambuf);
std::cout.rdbuf(cout_streambuf);
std::cerr.rdbuf(cerr_streambuf);
}
return NULL;
}