本文整理汇总了C++中sched_getcpu函数的典型用法代码示例。如果您正苦于以下问题:C++ sched_getcpu函数的具体用法?C++ sched_getcpu怎么用?C++ sched_getcpu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sched_getcpu函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: llamadaHilo
void llamadaHilo(int socket_fd){
char buf[BUF_SIZE];
int lectura;
if(mostrarInfo) printf("Socket Operativo: %d, \t CPU: %d\n", socket_fd, sched_getcpu());
int i;
int paquetesParaAtender = MAX_PACKS/NTHREADS;
//Marca FTRACE
if(enabledTrace) write(marker_fd, "MITRACE UDP: Nuevo Thread\n", 26);
for(i = 0; i < paquetesParaAtender; i++) {
if(enabledTrace) write(marker_fd, "MITRACE UDP: Comienza el read del socket\n", 41);
//lectura = recv(socket_fd, buf, BUF_SIZE, 0);
lectura = read(socket_fd, buf, BUF_SIZE);
if(lectura <= 0) {
fprintf(stderr, "Error en el read del socket (%d)\n", lectura);
exit(1);
}
if(first_pack==0) {
pthread_mutex_lock(&lock);
if(first_pack == 0) {
if(mostrarInfo) printf("got first pack\n");
first_pack = 1;
//Medir Inicio
gettimeofday(&dateInicio, NULL);
}
pthread_mutex_unlock(&lock);
}
}
if(mostrarInfo) printf("Fin Socket Operativo: %d, \t CPU: %d\n", socket_fd, sched_getcpu());
}
示例2: viterbi_stream_thread_loop
void* viterbi_stream_thread_loop(void* argst)
{
int i, execcount = 0;
pthr_info_t* args = (pthr_info_t*) argst;
DATA_STREAM *dstream = args->dstream;
#ifdef _GNU_SOURCE
printf("THR %d running on cpu %d\n", args->thrid, sched_getcpu());
#endif
for (i = 0; 1; i++)
{
execcount++; while (dstream->synccontrol != execcount) sched_yield();
// while (syncflags[args->thrid] == 0) sched_yield(); syncflags[args->thrid] = 0;
// sem_wait(&semsynch[args->thrid]);
tprintf("THR %d entering\n", args->thrid);
viterbi_stream_word_partitioned(dstream, NULL, args->thrid);
}
#ifdef _GNU_SOURCE
printf("THR %d running on cpu %d\n", args->thrid, sched_getcpu());
#endif
return (void*) 0;
}
示例3: set_cpu_affinity
void set_cpu_affinity(int cpu) {
int ret;
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
pprintf("current CPU %d\n", sched_getcpu());
ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
if (ret == -1)
err("sched_setaffinity");
pprintf("current CPU %d\n", sched_getcpu());
}
示例4: CPU_ZERO
void *thread_func_1(void *ptr){
cpu_set_t mask;
CPU_ZERO(&mask);
CPU_SET(0, &mask);
if (pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask) <0)perror("pthread_setaffinity_np");
printf("Policzylem %f na CPU %d\n", waste_time(10000), sched_getcpu());
CPU_ZERO(&mask);
CPU_SET(1, &mask);
if (pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask) <0)perror("pthread_setaffinity_np");
printf("Policzylem %f na CPU %d\n", waste_time(10000), sched_getcpu());
return NULL;
}
示例5: switch
resource_allocation_key ring_allocation_logic::get_res_key_by_logic()
{
resource_allocation_key key = DEFAULT_RING_KEY;
switch (m_ring_allocation_logic) {
case RING_LOGIC_PER_INTERFACE:
key = 0;
break;
case RING_LOGIC_PER_SOCKET:
key = m_fd;
break;
case RING_LOGIC_PER_THREAD:
key = pthread_self();
break;
case RING_LOGIC_PER_CORE:
case RING_LOGIC_PER_CORE_ATTACH_THREADS:
key = sched_getcpu();
break;
BULLSEYE_EXCLUDE_BLOCK_START
default:
//not suppose to get here
ral_logdbg("non-valid ring logic = %d", m_ring_allocation_logic);
break;
BULLSEYE_EXCLUDE_BLOCK_END
}
return key;
}
示例6: main
int main(int argc, const char* argv[]) {
constexpr unsigned num_threads = 4;
std::mutex iomutex;
std::vector<std::thread> threads(num_threads);
for(unsigned i=0;i<num_threads;i++) {
threads[i] = std::thread([&iomutex, i] {
std::this_thread::sleep_for(std::chrono::milliseconds(20));
while(1) {
{
std::lock_guard<std::mutex> iolock(iomutex);
std::cout<<"Thread #"<<i<<": on CPU"<<sched_getcpu()<<"\n";
}
std::this_thread::sleep_for(std::chrono::milliseconds(900));
}
});
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(i, &cpuset);
int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset);
if(rc != 0) {
std::cerr<< "Error calling pthread_setaffinity_np: "<<rc<<"\n";
}
}
for(auto &t : threads) {
t.join();
}
return 0;
}
示例7: sched_getcpu
void *test(void *data)
{
unsigned int mySeed = seed + sched_getcpu();
long myOps = operations / nb_threads;
long val = -1;
int op;
while (myOps > 0) {
op = rand_r(&mySeed) % 100;
if (op < update) {
if (val == -1) {
/* Add random value */
val = (rand_r(&mySeed) % range) + 1;
if(set_add(val) == 0) {
val = -1;
}
} else {
/* Remove random value */
int res = set_remove( val);
val = -1;
}
} else {
/* Look for random value */
long tmp = (rand_r(&mySeed) % range) + 1;
set_contains(tmp);
}
myOps--;
}
return NULL;
}
示例8: runoncpu
void* HiccupsInfo::runthread(void* vp) {
ThrdStart* ts = (ThrdStart*)vp;
int id = ts->thrdid_;
int cpu;
runoncpu(conf.cpus_[id]);
if ((cpu = sched_getcpu()) < 0) {
perror("sched_getcpu");
exit(1);
}
if (cpu != conf.cpus_[id]) {
fprintf(stderr, "Thread %d: tid: %d, running on wrong cpu: %d, expected: %d\n",
id, gettid(), cpu, conf.cpus_[id]);
exit(1);
}
unsigned long sz = HPGSZ(sizeof(HiccupsInfo));
fprintf(stdout, "thread#: %3d tid: %d size %lu cpu[%d]: %d\n",
id, gettid(), sz, id, conf.cpus_[id]);
HiccupsInfo::hudatap_[id] = ts->hudata_ = (HiccupsInfo*)memzalloc(sz);
ts->hudata_->cpuid_ = conf.cpus_[id];
ts->hudata_->id_ = id;
ts->hudata_->bins_ = (HUbin*)memzalloc(conf.bins_ * sizeof(bins_[0]));
ts->rv_ = ts->hudata_->run();
if (id)
pthread_exit(0);
return 0;
}
示例9: defined
//----------------------------------------------------------------------------------
//
//----------------------------------------------------------------------------------
void Profiler_Imp::Start(int id)
{
Profile::Ptr profile = nullptr;
for (auto& x : m_profiles)
{
if (x->GetID() == id)
{
profile = x;
}
}
if (profile == nullptr)
{
profile = make_shared<Profile>(id);
m_profiles.push_back(profile);
}
profile->GetCurrent()->SetStartTime(asd::GetTime());
#if _WIN32
profile->GetCurrent()->SetProcessorNumber(GetCurrentProcessorNumber());
#elif defined(__APPLE__)
// sched_getcpuがないようなので代用。よりよいものがあれば差し替えてください。
profile->GetCurrent()->SetProcessorNumber(
std::hash<std::thread::id>()(std::this_thread::get_id()));
#else
profile->GetCurrent()->SetProcessorNumber(sched_getcpu());
#endif
}
示例10: sononblock
static apr_status_t sononblock(int sd)
{
#ifndef BEOS
#ifdef HAVE_MTCP
int cpu = sched_getcpu();
if(mtcp_setsock_nonblock(g_mctx[cpu], sd)<0)
return errno;
#else
int fd_flags;
fd_flags = fcntl(sd, F_GETFL, 0);
#if defined(O_NONBLOCK)
fd_flags |= O_NONBLOCK;
#elif defined(O_NDELAY)
fd_flags |= O_NDELAY;
#elif defined(FNDELAY)
fd_flags |= FNDELAY;
#else
#error Please teach APR how to make sockets non-blocking on your platform.
#endif
if (fcntl(sd, F_SETFL, fd_flags) == -1) {
return errno;
}
#endif
#else
int on = 1;
if (setsockopt(sd, SOL_SOCKET, SO_NONBLOCK, &on, sizeof(int)) < 0)
return errno;
#endif /* BEOS */
return APR_SUCCESS;
}
示例11: uv_inject
void uv_inject(page_desc_t *pd,
page_desc_t *pdbegin,
page_desc_t *pdend,
unsigned long pages,
unsigned long addr,
unsigned long addrend,
unsigned int pagesize,
unsigned long mattr,
unsigned long nodeid,
unsigned long paddr,
char *pte_str,
unsigned long nodeid_start,
unsigned long mattr_start,
unsigned long addr_start,
int mce_opt)
{
int count = 0;
eid.cpu = sched_getcpu();
for (pd=pdbegin, pdend=pd+pages; pd<pdend && addr < addrend; pd++, addr += pagesize) {
if (pd->flags & PD_HOLE) {
pagesize = pd->pte;
mattr = 0;
nodeid = -1;
} else {
nodeid = get_pnodeid(*pd);
paddr = get_paddr(*pd);
if (nodeid == INVALID_NODE)
nodeid = 0;
mattr = get_memory_attr(*pd);
pagesize = get_pagesize(*pd);
if (mattr && paddr) {
if ((pd_total / 2) == count){
sprintf(pte_str, " 0x%016lx ", pd->pte);
printf("\t[%012lx] -> 0x%012lx on %s %3s %s%s\n",
addr, paddr, idstr(), nodestr(nodeid),
pte_str, get_memory_attr_str(nodeid, mattr));
/* Setting value at memory location for recovery
* before injecting.
*/
memset((void *)addr, 'A', pagesize);
injecteddata = (char *)addr;
printf("Data:%x\n",*injecteddata);
eid.addr = paddr;
eid.cpu = nodeid;
break;//only allow once for now
}
}
}
count++;
}
if (delay){
printf("Enter char to inject..");
getchar();
}
if(!manual){
inject_uc(eid.addr, 0 /*int notrigger*/);
}
}
示例12: randroutine
static void randroutine(const void *const arg)
{
const idargument *const ia = (idargument *)arg;
const workset *const ws = ia->tp->extra;
const runconfig *const rc = ia->tp->rc;
const unsigned id = ia->id;
const unsigned sz = rc->size;
const unsigned l = sz;
const unsigned m = sz;
const unsigned n = sz;
const unsigned tr = tilerows;
const unsigned tc = tilecols;
const joblayout al = definejob(rc, id, l, m, tr, tc);
const joblayout bl = definejob(rc, id, m, n, tc, tr);
eltype *const a = ws->a + al.baseoffset / sizeof(eltype);
eltype *const b = ws->b + bl.baseoffset / sizeof(eltype);
matfill(id, al.absolutebaserow, a, al.baserow, al.nrows, m, tc,
elrand);
matfill(id * 5, bl.absolutebaserow, b, bl.baserow, bl.nrows, n, tr,
elrand);
printf("rand %03u with %u rows is done on core %d\n", id, al.nrows,
sched_getcpu());
}
示例13: multroutine
static void multroutine(const void *const arg)
{
const idargument *const ia = (idargument *)arg;
const workset *const ws = ia->tp->extra;
const runconfig *const rc = ia->tp->rc;
const unsigned id = ia->id;
const unsigned sz = rc->size;
const unsigned l = sz;
const unsigned m = sz;
const unsigned n = sz;
const unsigned tr = tilerows;
const unsigned tc = tilecols;
const joblayout al = definejob(rc, id, l, m, tr, tc);
const joblayout rl = definejob(rc, id, l, n, tr, tr);
const eltype *const a = ws->a + al.baseoffset / sizeof(eltype);
eltype *const r = ws->r + rl.baseoffset / sizeof(eltype);
matmul(a, ws->b, al.baserow, al.nrows, m, n, r);
printf("mult %03u with %u rows is done on core %d\n", id, al.nrows,
sched_getcpu());
}
示例14: switch
/**
*
* @return the key that is part of a unique id in rings map
*/
uint64_t ring_allocation_logic::calc_res_key_by_logic()
{
uint64_t res_key = 0;
switch (m_res_key.get_ring_alloc_logic()) {
case RING_LOGIC_PER_INTERFACE:
res_key = 0;
if (safe_mce_sys().tcp_ctl_thread > CTL_THREAD_DISABLE)
res_key = 1;
break;
case RING_LOGIC_PER_IP:
res_key = m_source.m_ip;
break;
case RING_LOGIC_PER_SOCKET:
res_key = m_source.m_fd;
break;
case RING_LOGIC_PER_USER_ID:
res_key = m_res_key.get_user_id_key();
break;
case RING_LOGIC_PER_THREAD:
res_key = pthread_self();
break;
case RING_LOGIC_PER_CORE:
case RING_LOGIC_PER_CORE_ATTACH_THREADS:
res_key = sched_getcpu();
break;
BULLSEYE_EXCLUDE_BLOCK_START
default:
//not suppose to get here
ral_logdbg("non-valid ring logic = %d", m_res_key.get_ring_alloc_logic());
break;
BULLSEYE_EXCLUDE_BLOCK_END
}
return res_key;
}
示例15: lock
int cpu_manager::reserve_cpu_for_thread(pthread_t tid, int suggested_cpu /* = NO_CPU */)
{
lock();
int cpu = g_n_thread_cpu_core;
if (cpu != NO_CPU) { //already reserved
unlock();
return cpu;
}
cpu_set_t cpu_set;
CPU_ZERO(&cpu_set);
int ret = pthread_getaffinity_np(tid, sizeof(cpu_set_t), &cpu_set);
if (ret) {
unlock();
__log_err("pthread_getaffinity_np failed for tid=%lu, ret=%d (errno=%d %m)", tid, ret, errno);
return -1;
}
int avail_cpus = CPU_COUNT(&cpu_set);
if (avail_cpus == 0) {
unlock();
__log_err("no cpu available for tid=%lu", tid);
return -1;
}
if (avail_cpus == 1) { //already attached
for (cpu = 0; cpu < MAX_CPU && !CPU_ISSET(cpu, &cpu_set); cpu++) {}
} else { //need to choose one cpu to attach to
int min_cpu_count = -1;
for (int i = 0, j = 0; i < MAX_CPU && j < avail_cpus; i++) {
if (!CPU_ISSET(i, &cpu_set)) continue;
j++;
if (min_cpu_count < 0 || m_cpu_thread_count[i] < min_cpu_count) {
min_cpu_count = m_cpu_thread_count[i];
cpu = i;
}
}
if (suggested_cpu >= 0
&& CPU_ISSET(suggested_cpu, &cpu_set)
&& m_cpu_thread_count[suggested_cpu] <= min_cpu_count + 1 ) {
cpu = suggested_cpu;
}
CPU_ZERO(&cpu_set);
CPU_SET(cpu, &cpu_set);
__log_dbg("attach tid=%lu running on cpu=%d to cpu=%d", tid, sched_getcpu(), cpu);
ret = pthread_setaffinity_np(tid, sizeof(cpu_set_t), &cpu_set);
if (ret) {
unlock();
__log_err("pthread_setaffinity_np failed for tid=%lu to cpu=%d, ret=%d (errno=%d %m)", tid, cpu, ret, errno);
return -1;
}
}
g_n_thread_cpu_core = cpu;
if (cpu > NO_CPU && cpu < MAX_CPU)
m_cpu_thread_count[cpu]++;
unlock();
return cpu;
}