本文整理汇总了C++中pthread_setaffinity_np函数的典型用法代码示例。如果您正苦于以下问题:C++ pthread_setaffinity_np函数的具体用法?C++ pthread_setaffinity_np怎么用?C++ pthread_setaffinity_np使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pthread_setaffinity_np函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: __sync_fetch_and_add
void ObMergeServer::on_ioth_start()
{
int64_t affinity_start_cpu = ms_config_.io_thread_start_cpu;
int64_t affinity_end_cpu = ms_config_.io_thread_end_cpu;
if (0 <= affinity_start_cpu
&& affinity_start_cpu <= affinity_end_cpu)
{
static volatile int64_t cpu = 0;
int64_t local_cpu = __sync_fetch_and_add(&cpu, 1) % (affinity_end_cpu - affinity_start_cpu + 1) + affinity_start_cpu;
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(local_cpu, &cpuset);
int ret = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
TBSYS_LOG(INFO, "io thread setaffinity tid=%ld ret=%d cpu=%ld start=%ld end=%ld",
GETTID(), ret, local_cpu, affinity_start_cpu, affinity_end_cpu);
}
}
示例2: set_cpu
static void set_cpu(void)
{
pthread_mutex_lock(&cpu_affinity_mutex);
static int core=0;
int num_cores = sysconf(_SC_NPROCESSORS_ONLN);
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(core, &cpuset);
if (pthread_setaffinity_np(pthread_self(),
sizeof(cpu_set_t), &cpuset) != 0) {
log_error("zmap", "can't set thread CPU affinity");
}
log_trace("zmap", "set thread %u affinity to core %d",
pthread_self(), core);
core = (core + 1) % num_cores;
pthread_mutex_unlock(&cpu_affinity_mutex);
}
示例3: vm_loop
static void
vm_loop(struct vmctx *ctx, int vcpu, uint64_t startrip)
{
int error, rc, prevcpu;
enum vm_exitcode exitcode;
cpuset_t active_cpus;
if (vcpumap[vcpu] != NULL) {
error = pthread_setaffinity_np(pthread_self(),
sizeof(cpuset_t), vcpumap[vcpu]);
assert(error == 0);
}
error = vm_active_cpus(ctx, &active_cpus);
assert(CPU_ISSET(vcpu, &active_cpus));
error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, startrip);
assert(error == 0);
while (1) {
error = vm_run(ctx, vcpu, &vmexit[vcpu]);
if (error != 0)
break;
prevcpu = vcpu;
exitcode = vmexit[vcpu].exitcode;
if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
exitcode);
exit(1);
}
rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);
switch (rc) {
case VMEXIT_CONTINUE:
break;
case VMEXIT_ABORT:
abort();
default:
exit(1);
}
}
fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
}
示例4: vm_loop
static void
vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip)
{
cpuset_t mask;
int error, rc, prevcpu;
enum vm_exitcode exitcode;
if (pincpu >= 0) {
CPU_ZERO(&mask);
CPU_SET(pincpu + vcpu, &mask);
error = pthread_setaffinity_np(pthread_self(),
sizeof(mask), &mask);
assert(error == 0);
}
while (1) {
error = vm_run(ctx, vcpu, rip, &vmexit[vcpu]);
if (error != 0)
break;
prevcpu = vcpu;
exitcode = vmexit[vcpu].exitcode;
if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
exitcode);
exit(1);
}
rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);
switch (rc) {
case VMEXIT_CONTINUE:
rip = vmexit[vcpu].rip + vmexit[vcpu].inst_length;
break;
case VMEXIT_RESTART:
rip = vmexit[vcpu].rip;
break;
case VMEXIT_RESET:
exit(0);
default:
exit(1);
}
}
fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
}
示例5: setaffinity
/* set the thread affinity. */
static int setaffinity(pthread_t me, int i)
{
cpu_set_t cpumask;
if (i == -1)
return 0;
/* Set thread affinity affinity.*/
CPU_ZERO(&cpumask);
CPU_SET(i, &cpumask);
if (pthread_setaffinity_np(me, sizeof(cpu_set_t), &cpumask) != 0) {
D("Unable to set affinity: %s", strerror(errno));
return 1;
}
return 0;
}
示例6: perror
void AExecutable::SetThreadAffinity(boost::thread* daThread, int threadPriority, std::vector<short> CPUsToBind, int scheduler) {
#ifndef __APPLE__
int policy;
pthread_t threadID = (pthread_t) (daThread->native_handle());
if (scheduler > 0) {
sched_param param;
if (pthread_getschedparam(threadID, &policy, ¶m) != 0) {
perror("pthread_getschedparam");
exit(EXIT_FAILURE);
}
//LOG_ERROR("Policy " << policy << ", priority " << param.sched_priority);
/**
* Set scheduling algorithm
* Possible values: SCHED_FIFO(1), SCHED_RR(2), SCHED_OTHER(0)
*/
policy = scheduler;
param.__sched_priority = threadPriority;
if (pthread_setschedparam(threadID, policy, ¶m) != 0) {
perror("pthread_setschedparam");
exit(EXIT_FAILURE);
}
}
if (CPUsToBind.size() > 0) {
/**
* Bind the thread to CPUs from CPUsToBind
*/
cpu_set_t mask;
CPU_ZERO(&mask);
for (unsigned int i = 0; i < CPUsToBind.size(); i++) {
if (CPUsToBind[i] == -1) {
CPU_ZERO(&mask);
break;
}
CPU_SET(CPUsToBind[i], &mask);
}
if (pthread_setaffinity_np(threadID, sizeof(mask), &mask) < 0) {
throw NA62Error("Unable to bind threads to specific CPUs!");
}
}
#endif
}
示例7: fvl_ssd_write_t
static void fvl_ssd_write_t(void *arg)
{
fvl_ssd_arg_t *priv=arg;
fvl_queue_t *fqueue=priv->fqueue;
int rvl;
int fd ;
uint32_t index = priv->index;
uint8_t count=0;
char path1[20];
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(priv->cpu,&cpuset);
rvl = pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&cpuset);
if(rvl){
printf("(%d)fail:pthread_setaffinity_np()\n",priv->cpu);
return;
}
sprintf(path1,"/mnt/test%d",index);
fd=fvl_ssd_open(path1);
int dequeue_num=-1;
void *buf=NULL;
/* uint32_t test_data=0+index*0x1000000;
uint32_t times=0,error_count=0;
*/
while(1)
{
dequeue_num=fvl_dequeue(fqueue,4);
if(dequeue_num != -1)
{
buf = fqueue->buf+dequeue_num*FVL_SRIO_DMA_BLKBYTES;
fvl_ssd_write(fd,buf,4*FVL_SRIO_DMA_BLKBYTES);
fvl_dequeue_complete(fqueue,4);
count++;
if(count == 16)
{
fvl_ssd_close(fd);
index=index+4;
sprintf(path1,"/mnt/test%d",index);
fd=fvl_ssd_open(path1);
count = 0;
}
}
}
}
示例8: thread_channel_recv
void thread_channel_recv(void *arg)
{
struct timeval tm_start,tm_end;
fvl_read_rvl_t rlen;
uint8_t i=0;
int rvl=0;
uint64_t total_count=0;
gettimeofday(&tm_start,NULL);
int fd=0;
int *j=(int *)arg;
fd=*j;
printf("j:%d\n",*j);
int cpu=*j+11;
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(cpu,&cpuset);
rvl = pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&cpuset);
if(rvl){
printf("(%d)fail:pthread_setaffinity_np()\n",cpu);
return;
}
while(1)
{
rlen.len=0x100000;
rvl=fvl_srio_read(fd,&rlen);
if(rlen.len!=0)
{
test_data(i,rlen.buf_virt,10485,0);
i++;
// printf("##########:%d\n",i);
gettimeofday(&tm_end,NULL);
total_count++;
double diff=(tm_end.tv_sec-tm_start.tv_sec)+(tm_end.tv_usec-tm_start.tv_usec)/1000000.0;
if(diff>5)
{
double da_lu=total_count/diff;
printf("receive fd: %d length(byte): %-15u time(s): %-15f avg MB/s: %-15f total_count:%lld \n",fd,rlen.len,diff,da_lu,total_count);
fflush(stdout);
total_count=0;
gettimeofday(&tm_start,NULL);
}
}
fvl_srio_read_feedback(fd,rlen.num);
}
}
示例9: CPU_ZERO
void *t_send(void *arg)
{
int z=0;
char buf[40000];
struct task_type *send=arg;
int sockfd=send->fd;
struct sockaddr_in adr_srvr=send->adr;
int opt=1;
int size = send->len;
cpu_set_t cpuset;
struct timeval tm_start,tm_end;
CPU_ZERO(&cpuset);
CPU_SET(send->cpu,&cpuset);
if((z=pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&cpuset))>0)
{
printf("cpu error!\n");
exit(1);
}
gettimeofday(&tm_start,NULL);
uint64_t total_count=0;
while(1)
{
// memset(buf,1,size);
z=sendto(sockfd,buf,size,0,(struct sockaddr *)&adr_srvr,sizeof(adr_srvr));
if(z<0)
{
printf("send error!\n");
exit(1);
}
total_count++;
gettimeofday(&tm_end,NULL);
double diff = (tm_end.tv_sec-tm_start.tv_sec)+((tm_end.tv_usec-tm_start.tv_usec)/1000000.0);
if(diff>5)
{
double du_la=((total_count*size)/diff)/1024/1024;
printf("thread: %d length(byte):%-15u time(s):%-15f avg MB/s %-15f total_count:%lld\n",send->cpu,size,diff,du_la,total_count);
total_count=0;
gettimeofday(&tm_start,NULL);
}
}
pthread_exit(NULL);
}
示例10: CPU_ZERO
bool SkThread::setProcessorAffinity(unsigned int processor) {
SkThread_PThreadData* pthreadData = static_cast<SkThread_PThreadData*>(fData);
if (!pthreadData->fValidPThread) {
return false;
}
cpu_set_t parentCpuset;
if (0 != pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &parentCpuset)) {
return false;
}
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(nth_set_cpu(processor, &parentCpuset), &cpuset);
return 0 == pthread_setaffinity_np(pthreadData->fPThread,
sizeof(cpu_set_t),
&cpuset);
}
示例11: write_affinity
void write_affinity(cpus_t *p) {
cpu_set_t mask;
int exists_pos = 0 ;
CPU_ZERO(&mask) ;
for (int k = 0 ; k < p->sz ; k++) {
if (p->cpu[k] >= 0) {
CPU_SET(p->cpu[k],&mask) ;
exists_pos = 1 ;
}
}
if (exists_pos) {
int r = pthread_setaffinity_np(pthread_self(),sizeof(mask),&mask) ;
if (r != 0) {
errexit("pthread_setaffinity_np",r) ;
}
}
}
示例12: bindthread2core
int bindthread2core(pthread_t thread_id, u_int core_id) {
#ifdef HAVE_PTHREAD_SETAFFINITY_NP
cpu_set_t cpuset;
int s;
CPU_ZERO(&cpuset);
CPU_SET(core_id, &cpuset);
if((s = pthread_setaffinity_np(thread_id, sizeof(cpu_set_t), &cpuset)) != 0) {
fprintf(stderr, "Error while binding to core %u: errno=%i\n", core_id, s);
return(-1);
} else {
return(0);
}
#else
fprintf(stderr, "WARNING: your system lacks of pthread_setaffinity_np() (not core binding)\n");
return(0);
#endif
}
示例13: bar
void bar(int x)
{
cpu_set_t cpuset;
pthread_t tid = pthread_self();
CPU_ZERO(&cpuset);
CPU_SET(2, &cpuset);
pthread_setaffinity_np(tid, sizeof(cpuset), &cpuset);
timespec time_val{};
//time_val.tv_nsec = sleep_ns; // 10 micro sec
// do stuff...
for(int i = 0 ; !signal_received; i++)
{
std::cout << " bar: "<< tid << std::endl;
//nanosleep( &time_val, NULL);
}
}
示例14: bind_cpu
static int bind_cpu(thread_t *thread) {
size_t setsize;
cpu_set_t *cur_cpuset;
cpu_set_t *new_cpuset;
int ncpus = max_number_of_cpus();
if (thread == NULL) {
// if thread is NULL it means the emulator is disabled, return without setting CPU affinity
//printf("thread self is null");
return 0;
}
if (ncpus == 0) {
return 1;
}
setsize = CPU_ALLOC_SIZE(ncpus);
cur_cpuset = CPU_ALLOC(ncpus);
new_cpuset = CPU_ALLOC(ncpus);
CPU_ZERO_S(setsize, cur_cpuset);
CPU_ZERO_S(setsize, new_cpuset);
CPU_SET_S(thread->cpu_id, setsize, new_cpuset);
if (pthread_getaffinity_np(thread->pthread, setsize, cur_cpuset) != 0) {
DBG_LOG(ERROR, "Cannot get thread tid [%d] affinity, pthread: 0x%lx on processor %d\n",
thread->tid, thread->pthread, thread->cpu_id);
return 1;
}
if (CPU_EQUAL(cur_cpuset, new_cpuset)) {
//printf("No need to bind CPU\n");
return 0;
}
DBG_LOG(INFO, "Binding thread tid [%d] pthread: 0x%lx on processor %d\n", thread->tid, thread->pthread, thread->cpu_id);
if (pthread_setaffinity_np(thread->pthread, setsize, new_cpuset) != 0) {
DBG_LOG(ERROR, "Cannot bind thread tid [%d] pthread: 0x%lx on processor %d\n", thread->tid, thread->pthread, thread->cpu_id);
return 1;
}
return 0;
}
示例15: main
int
main(int argc, char *argv[])
{
int s, j, nprocs;
cpu_set_t cpuset;
pthread_t thread;
thread = pthread_self();
nprocs = sysconf(_SC_NPROCESSORS_ONLN);
/* Set affinity mask to include CPUs 0 to 7 */
CPU_ZERO(&cpuset);
for (j = 0; j < nprocs; j++)
CPU_SET(j, &cpuset);
CPU_CLR(1, &cpuset);
CPU_CLR(2, &cpuset);
CPU_CLR(3, &cpuset);
CPU_CLR(4, &cpuset);
CPU_CLR(5, &cpuset);
/* check if the cpu's have actually been set */
for (j = 0; j < nprocs; j++)
fprintf(stdout, "CPU: %d, status: %d\n", j, CPU_ISSET(j, &cpuset));
s = pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
if (s != 0)
handle_error_en(s, "pthread_setaffinity_np");
/* Check the actual affinity mask assigned to the thread */
s = pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
if (s != 0)
handle_error_en(s, "pthread_getaffinity_np");
printf("Set returned by pthread_getaffinity_np() contained:\n");
for (j = 0; j < CPU_SETSIZE; j++)
if (CPU_ISSET(j, &cpuset))
printf(" CPU %d\n", j);
exit(EXIT_SUCCESS);
}