当前位置: 首页>>代码示例>>C++>>正文


C++ CPU_ZERO函数代码示例

本文整理汇总了C++中CPU_ZERO函数的典型用法代码示例。如果您正苦于以下问题:C++ CPU_ZERO函数的具体用法?C++ CPU_ZERO怎么用?C++ CPU_ZERO使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了CPU_ZERO函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: __attribute__


//.........这里部分代码省略.........
    liblock_server_cores[2] = topology->nodes[0].cores[0];
    liblock_server_cores[3] = topology->nodes[0].cores[0];
    liblock_server_cores[4] = topology->nodes[0].cores[0];
    liblock_server_cores[5] = topology->nodes[0].cores[0];
    liblock_server_cores[6] = topology->nodes[0].cores[0];
    liblock_server_cores[7] = topology->nodes[0].cores[0];
    liblock_server_cores[8] = topology->nodes[0].cores[0];
    liblock_server_cores[9] = topology->nodes[0].cores[0];
    liblock_server_cores[10] = topology->nodes[0].cores[0];
#else

    liblock_server_cores[0] = topology->nodes[0].cores[0];
    liblock_server_cores[1] = topology->nodes[0].cores[0];
    liblock_server_cores[2] = topology->nodes[0].cores[0];
    liblock_server_cores[3] = topology->nodes[0].cores[0];
    liblock_server_cores[4] = topology->nodes[0].cores[0];
    liblock_server_cores[5] = topology->nodes[0].cores[1];
    liblock_server_cores[6] = topology->nodes[0].cores[1];
    liblock_server_cores[7] = topology->nodes[0].cores[0];
    liblock_server_cores[8] = topology->nodes[0].cores[1];
    liblock_server_cores[9] = topology->nodes[0].cores[1];
    liblock_server_cores[10] = topology->nodes[0].cores[1];
#endif

	liblock_lock_name = getenv("LIBLOCK_LOCK_NAME");
	if(!liblock_lock_name)
		liblock_lock_name = "rcl";

	is_rcl = !strcmp(liblock_lock_name, "rcl") ||
             !strcmp(liblock_lock_name, "multircl");

	liblock_start_server_threads_by_hand = 1;
	liblock_servers_always_up = 1;

	sprintf(get_cmd, "/proc/%d/cmdline", getpid());
	FILE* f=fopen(get_cmd, "r");
	if(!f) {
		printf("!!! warning: unable to find command line\n");
	}
	char buf[1024];
	buf[0] = 0;
	if(!fgets(buf, 1024, f))
		printf("fgets\n");

	printf("**** testing %s with lock %s\n", buf,
           liblock_lock_name);

    /* Pre-bind */
    cpu_set_t    cpuset;
    CPU_ZERO(&cpuset);
    CPU_SET(topology->nodes[0].cores[2]->core_id, &cpuset);
    if(sched_setaffinity(0, sizeof(cpu_set_t), &cpuset))   
        fatal("pthread_setaffinity_np");
    /* /Pre-bind */

	if(is_rcl) {
		go = 0;

        liblock_reserve_core_for(topology->nodes[0].cores[0], liblock_lock_name);
#ifndef ONE_SERVER
        liblock_reserve_core_for(topology->nodes[0].cores[1], liblock_lock_name);
#endif
/*
        for (i = 0; i < NUM_LOCKS; i++)
	      liblock_reserve_core_for(liblock_server_cores[i], liblock_lock_name);
*/
      
        /* launch the liblock threads */
		liblock_lookup(liblock_lock_name)->run(do_go); 
        
        while(!go)
			PAUSE();
	}

	client_cores = malloc(sizeof(int)*topology->nb_cores);

	int j, k, z;
	for(i=0, z=0; i<topology->nb_nodes; i++) {
		for(j=0; j<topology->nodes[i].nb_cores; j++)
        {
            int is_server_core = 0;
            
            if (is_rcl)
            {
                for (k = 0; k < NUM_LOCKS; k++)
			        if(topology->nodes[i].cores[j] == liblock_server_cores[k])
                        is_server_core = 1;
            }

            if (!is_server_core)
				client_cores[z++] = topology->nodes[i].cores[j]->core_id;
        }
    }

    n_available_cores = z;

    printf("**** %d available cores for clients.\n", z);

	liblock_auto_bind();
}
开发者ID:SANL-2015,项目名称:SANL-2015,代码行数:101,代码来源:liblock-config.c

示例2: pfring_zc_daq_initialize

static int pfring_zc_daq_initialize(const DAQ_Config_t *config,
				 void **ctxt_ptr, char *errbuf, size_t len) {
  Pfring_Context_t *context;
  DAQ_Dict* entry;
  u_int numCPU = get_nprocs();
  int i, max_buffer_len = 0;
  int num_buffers;

  context = calloc(1, sizeof(Pfring_Context_t));

  if (context == NULL) {
    snprintf(errbuf, len, "%s: Couldn't allocate memory for context!", __FUNCTION__);
    return DAQ_ERROR_NOMEM;
  }

  context->mode = config->mode;
  context->snaplen = config->snaplen;
  context->promisc_flag =(config->flags & DAQ_CFG_PROMISC);
  context->timeout = (config->timeout > 0) ? (int) config->timeout : -1;
  context->devices[DAQ_PF_RING_PASSIVE_DEV_IDX] = strdup(config->name);
  context->num_devices = 1;
  context->ids_bridge = 0;
  context->clusterid = 0;
  context->max_buffer_len = 0;
  context->bindcpu = 0;

  if (!context->devices[DAQ_PF_RING_PASSIVE_DEV_IDX]) {
    snprintf(errbuf, len, "%s: Couldn't allocate memory for the device string!", __FUNCTION__);
    free(context);
    return DAQ_ERROR_NOMEM;
  }

  for (entry = config->values; entry; entry = entry->next) {
    if (!entry->value || !*entry->value) {
      snprintf(errbuf, len, "%s: variable needs value(%s)\n", __FUNCTION__, entry->key);
      return DAQ_ERROR;
    } else if (!strcmp(entry->key, "bindcpu")) {
      char *end = entry->value;
      context->bindcpu = (int) strtol(entry->value, &end, 0);
      if(*end
	 || (context->bindcpu >= numCPU)) {
	snprintf(errbuf, len, "%s: bad bindcpu(%s)\n", __FUNCTION__, entry->value);
	return DAQ_ERROR;
      } else {
	cpu_set_t mask;

	CPU_ZERO(&mask);
	CPU_SET((int)context->bindcpu, &mask);
	if (sched_setaffinity(0, sizeof(mask), &mask) < 0) {
	  snprintf(errbuf, len, "%s:failed to set bindcpu(%u) on pid %i\n", __FUNCTION__, context->bindcpu, getpid());
	  return DAQ_ERROR;
	}
      }
    } else if (!strcmp(entry->key, "timeout")) {
      char *end = entry->value;
      context->timeout = (int) strtol(entry->value, &end, 0);
      if (*end || (context->timeout < 0)) {
	snprintf(errbuf, len, "%s: bad timeout(%s)\n", __FUNCTION__, entry->value);
	return DAQ_ERROR;
      }
    } else if (!strcmp(entry->key, "idsbridge")) {
      if (context->mode == DAQ_MODE_PASSIVE) {
        char* end = entry->value;
        context->ids_bridge = (int) strtol(entry->value, &end, 0);
	if (*end || (context->ids_bridge < 0) || (context->ids_bridge > 2)) {
	  snprintf(errbuf, len, "%s: bad ids bridge mode(%s)\n", __FUNCTION__, entry->value);
	  return DAQ_ERROR;
	}
      } else {
        snprintf(errbuf, len, "%s: idsbridge is for passive mode only\n", __FUNCTION__);
        return DAQ_ERROR;
      }
    } else if (!strcmp(entry->key, "clusterid")) {
      char *end = entry->value;
      context->clusterid = (int) strtol(entry->value, &end, 0);
      if (*end || (context->clusterid < 0)) {
        snprintf(errbuf, len, "%s: bad clusterid(%s)\n", __FUNCTION__, entry->value);
        return DAQ_ERROR;
      }
    } else {
      snprintf(errbuf, len, "%s: unsupported variable(%s=%s)\n", __FUNCTION__, entry->key, entry->value);
      return DAQ_ERROR;
    }
  }

  if (context->mode == DAQ_MODE_READ_FILE) {
    snprintf(errbuf, len, "%s: function not supported on PF_RING", __FUNCTION__);
    free(context);
    return DAQ_ERROR;
  } else if (context->mode == DAQ_MODE_INLINE || (context->mode == DAQ_MODE_PASSIVE && context->ids_bridge)) {
    /* zc:ethX+zc:ethY,zc:ethZ+zc:ethJ */
    char *twins, *twins_pos = NULL;
    context->num_devices = 0;

    twins = strtok_r(context->devices[DAQ_PF_RING_PASSIVE_DEV_IDX], ",", &twins_pos);
    while(twins != NULL) {
      char *dev, *dev_pos = NULL, *tx_dev;
      int last_twin = 0;

      dev = strtok_r(twins, "+", &dev_pos);
//.........这里部分代码省略.........
开发者ID:nakuljavali,项目名称:pfring,代码行数:101,代码来源:daq_pfring_zc.c

示例3: start

			void start(std::vector<uint64_t> const & procs)
			{
				if ( ! thread.get() )
				{
					thread_ptr_type tthread(new pthread_t);
					thread = UNIQUE_PTR_MOVE(tthread);

					pthread_attr_t attr;
					if ( pthread_attr_init(&attr) )
					{
						::libmaus2::exception::LibMausException se;
						se.getStream() << "pthread_attr_init failed:" << strerror(errno);
						se.finish();
						throw se;
					}

					cpu_set_t cpuset;

					CPU_ZERO(&cpuset);
					for ( uint64_t i = 0; i < procs.size(); ++i )
						CPU_SET(procs[i],&cpuset);

					if ( pthread_attr_setaffinity_np(&attr,sizeof(cpu_set_t),&cpuset) )
					{
						pthread_attr_destroy(&attr);
						::libmaus2::exception::LibMausException se;
						se.getStream() << "pthread_attr_setaffinity_np failed:" << strerror(errno);
						se.finish();
						throw se;
					}

					#if 0
					std::cerr << "Creating thread with affinity." << std::endl;
					std::cerr << ::libmaus2::util::StackTrace::getStackTrace() << std::endl;
					#endif

					if ( pthread_create(thread.get(),&attr,dispatch,this) )
					{
						pthread_attr_destroy(&attr);
						::libmaus2::exception::LibMausException se;
						se.getStream() << "pthread_create() failed in PosixThread::start()";
						se.finish();
						throw se;
					}

					if ( pthread_attr_destroy(&attr) )
					{
						::libmaus2::exception::LibMausException se;
						se.getStream() << "pthread_attr_destroy failed:" << strerror(errno);
						se.finish();
						throw se;

					}
				}
				else
				{
					::libmaus2::exception::LibMausException se;
					se.getStream() << "PosixThread::start() called but object is already in use.";
					se.finish();
					throw se;
				}
			}
开发者ID:whitwham,项目名称:libmaus2,代码行数:62,代码来源:PosixThread.hpp

示例4: CPU_ZERO

affinity_set::affinity_set()
{
    CPU_ZERO(&set);
}
开发者ID:cjy7117,项目名称:FT-MAGMA,代码行数:4,代码来源:affinity.cpp

示例5: scheprocess

int scheprocess(MTN *mtn, MTNJOB *job, int job_max, int cpu_lim, int cpu_num)
{
  int i;
  int cpu_id;
  int cpu_use;
  cpu_set_t cpumask;

  cpu_id  = 0;
  cpu_use = 0;
  scanprocess(job, job_max);
  for(i=0;i<job_max;i++){
    if(!job[i].pid){
      continue;
    }
    getjobusage(job + i);
    if(cpu_id != job[i].cid){
      CPU_ZERO(&cpumask);
      CPU_SET(cpu_id, &cpumask);
      if(sched_setaffinity(job[i].pid, cpu_num, &cpumask) == -1){
        mtnlogger(mtn, 0, "[error] %s: sched_setaffinity: %s\n", __func__, strerror(errno));
        job->cid = -1;
      }else{
        job->cid = cpu_id;
      }
    }
    cpu_id  += 1;
    cpu_id  %= cpu_num;
    cpu_use += job[i].cpu;
    //MTNDEBUG("CMD=%s STATE=%c CPU=%d.%d\n", job->cmd, job->pstat[0].state, job->cpu / 10, job->cpu % 10);
  }
  //MTNDEBUG("[CPU=%d.%d%% LIM=%d CPU=%d]\n", ctx->cpu_use / 10, ctx->cpu_use % 10, ctx->cpu_lim / 10, ctx->cnt.cpu);

  if(!cpu_lim){
    return(cpu_use);
  }

  for(i=0;i<job_max;i++){
    if(!job[i].pid){
      continue;
    }
    if(cpu_lim * cpu_num < cpu_use){
      // 過負荷状態
      if(job[i].pstat[0].state != 'T'){
        if(job[i].cpu > cpu_lim){
          kill(-(job[i].pid), SIGSTOP);
          return(cpu_use);
        }
      }
    }else{
      // アイドル状態
      if(job[i].pstat[0].state == 'T'){
        if(job[i].cpu < cpu_lim){
          kill(-(job[i].pid), SIGCONT);
          return(cpu_use);
        }
      }
    }
  }

  for(i=0;i<job_max;i++){
    if(!job[i].pid){
      continue;
    }
    if(job[i].pstat[0].state != 'T'){
      if(job[i].cpu > cpu_lim){
        kill(-(job[i].pid), SIGSTOP);
      }
    }else{
      if(job[i].cpu < cpu_lim){
        kill(-(job[i].pid), SIGCONT);
      }
    }
  }
  return(cpu_use);
}
开发者ID:kizkoh,项目名称:mtnd,代码行数:75,代码来源:mtnexec.c

示例6: main


//.........这里部分代码省略.........
		num_nodes);

	/* Make a shared anonymous map of the RAM */
	shm = mmap(NULL, ram_size, PROT_READ | PROT_WRITE,
		MAP_SHARED | MAP_ANONYMOUS, 0, 0);
	if (shm == MAP_FAILED) {
		perror("mmap");
		return 2;
	}
	printf("mmap region: %p (%llu nodes)\n", shm, num_nodes);

	/* Create an SHM condition variable.  Bogus, I know... */
	cond = mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE,
		MAP_SHARED | MAP_ANONYMOUS, 0, 0);
	if (cond == MAP_FAILED) {
		perror("mmap");
		return 4;
	}
	*cond = 1;

	/* Create a "graph" by populating it with random pointers. */
	printf("Populating nodes...");
	fflush(stdout);
	populate_graph(shm, num_nodes);
	printf("done.\n");

	printf("Creating %lu processes with reports every %lu seconds \
and %d seconds between adding children.\n",
		num_forks, report_interval, add_wait);

	/* Fork off separate processes.  The shared region is shared
	 * across all children.  If we only wanted one thread, we shouldn't
	 * fork anything.  Note that the "cond" mmap is a really crappy
	 * condition variable kludge that works well enough for HERE ONLY. */
	for (c = (add_wait >= 0 ? 0 : 1); c < num_forks; c++) {
		/* Child should wait for the condition and then break. */
		if (!fork()) {
#ifdef __cpu_set_t_defined
			if (affinity) {
				CPU_ZERO(&my_cpu_mask);
				CPU_SET(c, &my_cpu_mask);
				if (0 != sched_setaffinity(0,sizeof(cpu_set_t), &my_cpu_mask)) {
					perror("sched_setaffinity");
				}
			}
#endif

			is_parent = 0;
			while (*cond) {
				usleep(10000);
			}
			break;
		}
	}
	if (is_parent) {
#ifdef __cpu_set_t_defined
		if (affinity) {
			CPU_ZERO(&my_cpu_mask);
			CPU_SET(0, &my_cpu_mask);
			if (0 != sched_setaffinity(0,sizeof(cpu_set_t), &my_cpu_mask)) {
				perror("sched_setaffinity");
			}
		}
#endif
		printf("All threads created.  Launching!\n");
		*cond = 0;
	}

	/* now start the work */
	if (!is_parent) {
start_thread:
		/* Set up the alarm handler to print speed info. */
		memset(&zig, 0x00, sizeof(zig));
		zig.sa_handler = alarm_func;
		sigaction(SIGALRM, &zig, NULL);
		gettimeofday(&last, NULL);
		alarm(report_interval);

		/* Walk the graph. */
		walk_graph(shm);

		/* This function never returns */
	} else {
		/* Start the ramp-up.  The children will never die,
		 * so we don't need to wait() for 'em.
		 */
		while (add_wait != -1) {
			sleep(add_wait);
			if (fork() == 0) {
				/* goto is cheesy, but works. */
				goto start_thread;
			} else {
				printf("Added thread.\n");
			}
		}
		goto start_thread;
	}

	return 0;
}
开发者ID:shubmit,项目名称:shub-ltp,代码行数:101,代码来源:snake.c

示例7: gmx_check_thread_affinity_set

/* Check the process affinity mask and if it is found to be non-zero,
 * will honor it and disable mdrun internal affinity setting.
 * Note that this will only work on Linux as we use a GNU feature.
 */
void
gmx_check_thread_affinity_set(FILE            *fplog,
                              const t_commrec *cr,
                              gmx_hw_opt_t    *hw_opt,
                              int  gmx_unused  nthreads_hw_avail,
                              gmx_bool         bAfterOpenmpInit)
{
    GMX_RELEASE_ASSERT(hw_opt, "hw_opt must be a non-NULL pointer");

    if (!bAfterOpenmpInit)
    {
        /* Check for externally set OpenMP affinity and turn off internal
         * pinning if any is found. We need to do this check early to tell
         * thread-MPI whether it should do pinning when spawning threads.
         * TODO: the above no longer holds, we should move these checks later
         */
        if (hw_opt->thread_affinity != threadaffOFF)
        {
            char *message;
            if (!gmx_omp_check_thread_affinity(&message))
            {
                /* TODO: with -pin auto we should only warn when using all cores */
                md_print_warn(cr, fplog, "%s", message);
                sfree(message);
                hw_opt->thread_affinity = threadaffOFF;
            }
        }

        /* With thread-MPI this is needed as pinning might get turned off,
         * which needs to be known before starting thread-MPI.
         * With thread-MPI hw_opt is processed here on the master rank
         * and passed to the other ranks later, so we only do this on master.
         */
        if (!SIMMASTER(cr))
        {
            return;
        }
#ifndef GMX_THREAD_MPI
        return;
#endif
    }

#ifdef HAVE_SCHED_AFFINITY
    int       ret;
    cpu_set_t mask_current;

    if (hw_opt->thread_affinity == threadaffOFF)
    {
        /* internal affinity setting is off, don't bother checking process affinity */
        return;
    }

    CPU_ZERO(&mask_current);
    if ((ret = sched_getaffinity(0, sizeof(cpu_set_t), &mask_current)) != 0)
    {
        /* failed to query affinity mask, will just return */
        if (debug)
        {
            fprintf(debug, "Failed to query affinity mask (error %d)", ret);
        }
        return;
    }

    /* Before proceeding with the actual check, make sure that the number of
     * detected CPUs is >= the CPUs in the current set.
     * We need to check for CPU_COUNT as it was added only in glibc 2.6. */
#ifdef CPU_COUNT
    if (nthreads_hw_avail < CPU_COUNT(&mask_current))
    {
        if (debug)
        {
            fprintf(debug, "%d hardware threads detected, but %d was returned by CPU_COUNT",
                    nthreads_hw_avail, CPU_COUNT(&mask_current));
        }
        return;
    }
#endif /* CPU_COUNT */

    gmx_bool bAllSet = TRUE;
    for (int i = 0; (i < nthreads_hw_avail && i < CPU_SETSIZE); i++)
    {
        bAllSet = bAllSet && (CPU_ISSET(i, &mask_current) != 0);
    }

#ifdef GMX_LIB_MPI
    gmx_bool  bAllSet_All;

    MPI_Allreduce(&bAllSet, &bAllSet_All, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);
    bAllSet = bAllSet_All;
#endif

    if (!bAllSet)
    {
        if (hw_opt->thread_affinity == threadaffAUTO)
        {
            if (!bAfterOpenmpInit)
//.........这里部分代码省略.........
开发者ID:carryer123,项目名称:gromacs,代码行数:101,代码来源:gmx_thread_affinity.cpp

示例8: main

//-----------------------------------------------------------------------------
// Simulator Entry Point
//-----------------------------------------------------------------------------
int main( int argc, char* argv[] ) {

    int result;

    controller_pid = 0;

    sim_pid = getpid();

    cpu_set_t cpuset_mask;
    // zero out the cpu set
    CPU_ZERO( &cpuset_mask );
    // set the cpu set s.t. controller only runs on 1 processor specified by DEFAULT_CONTROLLER_PROCESSOR
    CPU_SET( 0, &cpuset_mask );
    if ( sched_setaffinity( sim_pid, sizeof(cpuset_mask), &cpuset_mask ) == -1 ) {
        printf( "ERROR: Failed to set affinity for sim process.\n" );
        _exit( EXIT_FAILURE );
    }

    /*

    struct sched_param sim_params;
    sim_thread = pthread_self();
    //sim_params.sched_priority = 0;
    result = pthread_setschedparam( sim_thread, SCHED_RR, &sim_params );
    if( result != 0 ) {
        switch( errno ) {
        case EINVAL:
            printf( "errno: EINVAL\n" );
            break;
        case EPERM:
            printf( "errno: EPERM\n" );
            break;
        case ESRCH:
            printf( "errno: ESRCH\n" );
            break;
        default:
            printf( "errno: Unenumerated\n" );
            break;
        }

        _exit( EXIT_FAILURE );
    }
    */
    /*
    int sched_policy = sched_getscheduler( 0 );
    if ( sched_policy == -1 ) {
        printf( "ERROR: Failed to get scheduler policy for controller process.\n" );
        _exit( EXIT_FAILURE );
    } else {
        printf( "Sim Scheduling Policy: %d\n", sched_policy );

        struct sched_param params;
        params.sched_priority = 1;
        result = sched_setscheduler( 0, SCHED_RR, &params );
        if( result != 0 ) {
            switch( errno ) {
            case EINVAL:
                printf( "errno: EINVAL\n" );
                break;
            case EPERM:
                printf( "errno: EPERM\n" );
                break;
            case ESRCH:
                printf( "errno: ESRCH\n" );
                break;
            default:
                printf( "errno: Unenumerated\n" );
                break;
            }

            // error
            _exit( EXIT_FAILURE );
        }
    }
    */
    sim_priority = getpriority( PRIO_PROCESS, sim_pid );


    //fork_controller();

    result = pthread_attr_init( &monitor_thread_attr );
    if( result != 0 ) {
        // error
        _exit( EXIT_FAILURE );
    }

    struct sched_param monitor_sched_param;

    //monitor_sched_param.sched_priority = sched_getscheduler( pthread_self() ) + 2;

    result = pthread_attr_setinheritsched( &monitor_thread_attr, PTHREAD_EXPLICIT_SCHED );
    if( result != 0 ) {
        // error
        _exit( EXIT_FAILURE );
    }

    result = pthread_attr_setschedpolicy( &monitor_thread_attr, SCHED_RR );
//.........这里部分代码省略.........
开发者ID:gparmer,项目名称:tas,代码行数:101,代码来源:proto_threading.cpp

示例9: while

int sageBlockStreamer::streamLoop()
{
   while (streamerOn) {
      //int syncFrame = 0;
      //sage::printLog("\n========= wait for a frame ========\n");
      sageBlockFrame *buf = (sageBlockFrame *)doubleBuf->getBackBuffer();
      //sage::printLog("\n========= got a frame ==========\n");
      
      /* sungwon experimental */

      if ( affinityFlag ) {
#if ! defined (__APPLE__)
    	  cpu_set_t cpuset;
    	  CPU_ZERO(&cpuset);

    	  pthread_mutex_lock(&affinityMutex);
    	  std::list<int>::iterator it;
    	  for ( it=cpulist.begin(); it!=cpulist.end(); it++) {
    		  CPU_SET((*it), &cpuset);
    	  }
    	  affinityFlag = false; // reset flag
    	  pthread_mutex_unlock(&affinityMutex);

    	  if ( pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset) != 0 ) {
    		  perror("\n\npthread_setaffinity_np\n");
    	  }

    	  if ( pthread_getaffinity_np(pthread_self(), sizeof(cpuset), &cpuset) != 0 ) {
    		  perror("pthread_getaffinity_np");
    	  }
    	  else {
    		  fprintf(stderr,"SBS::%s() : current CPU affinity : ", __FUNCTION__);
    		  for (int i=0; i<CPU_SETSIZE; i++) {
    			  if (CPU_ISSET(i, &cpuset)) {
    				  fprintf(stderr, "%d ", i);
    			  }
    		  }
    		  fprintf(stderr,"\n");
    	  }
#endif
      }

      if ( config.swexp ) {
    	  //buf->updateBufferHeader(frameID, config.resX, config.resY);
    	  if ( nwObj->sendpixelonly(0, buf) <= 0 ) {
    		  streamerOn = false;
    	  }
    	  else {
    		  //fprintf(stderr, "sageBlockStreamer::%s() : frame %d sent \n", __FUNCTION__, frameID);
    	  }
    	  doubleBuf->releaseBackBuffer();
    	  frameID++;
    	  frameCounter++;
    	  continue;
      }

      char *msgStr = NULL;
      if (config.nodeNum > 1) {
         config.syncClientObj->sendSlaveUpdate(frameID);
         //sage::printLog("send update %d", config.rank);
         config.syncClientObj->waitForSyncData(msgStr);
         //sage::printLog("receive sync %d", config.rank);
         if (msgStr) {
            //std::cout << "reconfigure " << msgStr << std::endl;
            reconfigureStreams(msgStr);
            //firstConfiguration = false;
         }   
      }
      else {
         pthread_mutex_lock(reconfigMutex);
         if (msgQueue.size() > 0) {
            msgStr = msgQueue.front();
            reconfigureStreams(msgStr);
            //std::cout << "config ID : " << configID << std::endl;
            msgQueue.pop_front();
            firstConfiguration = false;
         }
         pthread_mutex_unlock(reconfigMutex);
      }
      
      if (config.nodeNum == 1)
         checkInterval();
      
      if (streamPixelData(buf) < 0) {
         streamerOn = false;
      }
         
      doubleBuf->releaseBackBuffer();
      //std::cout << "pt1" << std::endl;
   }   

   // for quiting other processes waiting a sync signal
   if (config.nodeNum > 1) {
      config.syncClientObj->sendSlaveUpdate(frameID);
   }
   
   sage::printLog("sageStreamer : network thread exit");
   
   return 0;
}
开发者ID:MattHung,项目名称:sage-graphics,代码行数:100,代码来源:sageBlockStreamer.cpp

示例10: cpu_set_init_Linux

/**
 *  @brief Linux-specific version of do_cpu_set_init().
 *
 *  @param cpu_set The CPU set.
 *
 *  @return 0 on success. Negative value on error.
 */
static void cpu_set_init_Linux(cpu_set_p cpu_set)
{

  int i;
  os_cpu_set_t original_affinity_set;
  int num_cpus = sysconf(_SC_NPROCESSORS_CONF);

  
  
  /* get current affinity set so we can restore it when we're done */
  if ( sched_getaffinity( 0, sizeof(os_cpu_set_t), &original_affinity_set ) )
      throw EXCEPTION2(ThreadException,
                       "sched_getaffinity() failed with %s",
                       errno_to_str().data());

  /* test restoration */
  if ( sched_setaffinity( 0, sizeof(os_cpu_set_t), &original_affinity_set ) )
      throw EXCEPTION2(ThreadException,
                       "sched_setaffinity() failed with %s",
                       errno_to_str().data());


  /* allocate cpus */
  cpu_t cpus = 
    (cpu_t)malloc( num_cpus * sizeof(struct cpu_s) );
  if ( cpus == NULL )
    throw EXCEPTION1(BadAlloc, "cpu array");

  for (i = 0; i < num_cpus; i++)
    /* initialize fields */
    CPU_ZERO( &cpus[i].cpu_set );



  /* find the CPUs on the system */
  int num_found = 0;
  int cpu_num;
  for (cpu_num = 0; ; cpu_num++)
  {
    os_cpu_set_t test_set;
    CPU_ZERO( &test_set );
    CPU_SET ( cpu_num, &test_set );

    if ( !sched_setaffinity( 0, sizeof(os_cpu_set_t), &test_set ) )
    {
      /* found a new CPU */
      cpus[num_found].cpu_unique_id = cpu_num;
      cpu_set_copy( &cpus[num_found].cpu_set, &test_set );
      num_found++;
      if ( num_found == num_cpus )
        break;
    }
  }  

  

  /* restore original affinity set */
  if ( sched_setaffinity( 0, sizeof(os_cpu_set_t), &original_affinity_set ) )
      throw EXCEPTION2(ThreadException,
                       "sched_setaffinity() failed with %s",
                       errno_to_str().data());
  
  
  /* return parameters */
  cpu_set->cpuset_num_cpus = num_cpus;
  cpu_set->cpuset_cpus     = cpus;
}
开发者ID:glycerine,项目名称:shore-mt,代码行数:74,代码来源:cpu_set.cpp

示例11: swFactoryProcess_worker_loop

/**
 * worker main loop
 */
static int swFactoryProcess_worker_loop(swFactory *factory, int worker_pti)
{
	swServer *serv = factory->ptr;

	struct
	{
		long pti;
		swEventData req;
	} rdata;
	int n;

	int pipe_rd = serv->workers[worker_pti].pipe_worker;

#ifdef HAVE_CPU_AFFINITY
	if (serv->open_cpu_affinity == 1)
	{
		cpu_set_t cpu_set;
		CPU_ZERO(&cpu_set);
		CPU_SET(worker_pti % SW_CPU_NUM, &cpu_set);
		if (0 != sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
		{
			swWarn("pthread_setaffinity_np set failed");
		}
	}
#endif

	//signal init
	swWorker_signal_init();

	//worker_id
	SwooleWG.id = worker_pti;

#ifndef SW_USE_RINGBUFFER
	int i;
	//for open_check_eof and  open_check_length
    if (serv->open_eof_check || serv->open_length_check || serv->open_http_protocol)
    {
        SwooleWG.buffer_input = sw_malloc(sizeof(swString*) * serv->reactor_num);
        if (SwooleWG.buffer_input == NULL)
        {
            swError("malloc for SwooleWG.buffer_input failed.");
            return SW_ERR;
        }
        for (i = 0; i < serv->reactor_num; i++)
        {
            SwooleWG.buffer_input[i] = swString_new(serv->buffer_input_size);
            if (SwooleWG.buffer_input[i] == NULL)
            {
                swError("buffer_input init failed.");
                return SW_ERR;
            }
        }
    }
#endif

	if (serv->ipc_mode == SW_IPC_MSGQUEUE)
	{
		//抢占式,使用相同的队列type
		if (serv->dispatch_mode == SW_DISPATCH_QUEUE)
		{
			//这里必须加1
			rdata.pti = serv->worker_num + 1;
		}
		else
		{
			//必须加1
			rdata.pti = worker_pti + 1;
		}
	}
	else
	{
		SwooleG.main_reactor = sw_malloc(sizeof(swReactor));
		if (SwooleG.main_reactor == NULL)
		{
			swError("[Worker] malloc for reactor failed.");
			return SW_ERR;
		}
		if (swReactor_auto(SwooleG.main_reactor, SW_REACTOR_MAXEVENTS) < 0)
		{
			swError("[Worker] create worker_reactor failed.");
			return SW_ERR;
		}
		swSetNonBlock(pipe_rd);
		SwooleG.main_reactor->ptr = serv;
		SwooleG.main_reactor->add(SwooleG.main_reactor, pipe_rd, SW_FD_PIPE);
		SwooleG.main_reactor->setHandle(SwooleG.main_reactor, SW_FD_PIPE, swFactoryProcess_worker_onPipeReceive);

#ifdef HAVE_SIGNALFD
		if (SwooleG.use_signalfd)
		{
			swSignalfd_setup(SwooleG.main_reactor);
		}
#endif
	}

    if (serv->max_request < 1)
    {
//.........这里部分代码省略.........
开发者ID:Ramis,项目名称:swoole-src,代码行数:101,代码来源:FactoryProcess.c

示例12: defined

static void *magma_ssytrd_hb2st_parallel_section(void *arg)
{
    magma_int_t my_core_id  = ((magma_sbulge_id_data*)arg) -> id;
    magma_sbulge_data* data = ((magma_sbulge_id_data*)arg) -> data;

    magma_int_t allcores_num   = data -> threads_num;
    magma_int_t n              = data -> n;
    magma_int_t nb             = data -> nb;
    magma_int_t nbtiles        = data -> nbtiles;
    magma_int_t grsiz          = data -> grsiz;
    magma_int_t Vblksiz        = data -> Vblksiz;
    magma_int_t compT          = data -> compT;
    float *A         = data -> A;
    magma_int_t lda            = data -> lda;
    float *V         = data -> V;
    magma_int_t ldv            = data -> ldv;
    float *TAU       = data -> TAU;
    float *T         = data -> T;
    magma_int_t ldt            = data -> ldt;
    volatile magma_int_t* prog = data -> prog;

    pthread_barrier_t* barrier = &(data -> barrier);

    magma_int_t sys_corenbr    = 1;

    float timeB=0.0, timeT=0.0;

#if defined(SETAFFINITY)
    // bind threads
    cpu_set_t set;
    // bind threads
    CPU_ZERO( &set );
    CPU_SET( my_core_id, &set );
    sched_setaffinity( 0, sizeof(set), &set) ;
#endif

    if(compT==1)
    {
        /* compute the Q1 overlapped with the bulge chasing+T.
         * if all_cores_num=1 it call Q1 on GPU and then bulgechasing.
         * otherwise the first thread run Q1 on GPU and
         * the other threads run the bulgechasing.
         * */

        if(allcores_num==1)
        {

            //=========================
            //    bulge chasing
            //=========================
            timeB = magma_wtime();

            magma_stile_bulge_parallel(0, 1, A, lda, V, ldv, TAU, n, nb, nbtiles, grsiz, Vblksiz, prog);

            timeB = magma_wtime()-timeB;
            printf("  Finish BULGE   timing= %f \n" ,timeB);


            //=========================
            // compute the T's to be used when applying Q2
            //=========================
            timeT = magma_wtime();
            magma_stile_bulge_computeT_parallel(0, 1, V, ldv, TAU, T, ldt, n, nb, Vblksiz);

            timeT = magma_wtime()-timeT;
            printf("  Finish T's     timing= %f \n" ,timeT);

        }else{ // allcore_num > 1

            magma_int_t id  = my_core_id;
            magma_int_t tot = allcores_num;


                //=========================
                //    bulge chasing
                //=========================
                if(id == 0)timeB = magma_wtime();

                magma_stile_bulge_parallel(id, tot, A, lda, V, ldv, TAU, n, nb, nbtiles, grsiz, Vblksiz, prog);
                pthread_barrier_wait(barrier);

                if(id == 0){
                    timeB = magma_wtime()-timeB;
                    printf("  Finish BULGE   timing= %f \n" ,timeB);
                }

                //=========================
                // compute the T's to be used when applying Q2
                //=========================
                if(id == 0)timeT = magma_wtime();

                magma_stile_bulge_computeT_parallel(id, tot, V, ldv, TAU, T, ldt, n, nb, Vblksiz);
                pthread_barrier_wait(barrier);

                if (id == 0){
                    timeT = magma_wtime()-timeT;
                    printf("  Finish T's     timing= %f \n" ,timeT);
                }

        } // allcore == 1
//.........这里部分代码省略.........
开发者ID:cjy7117,项目名称:DVFS-MAGMA,代码行数:101,代码来源:ssytrd_hb2st.cpp

示例13: xctrl_suspend

/* HVM mode suspension. */
static void
xctrl_suspend()
{
#ifdef SMP
	cpuset_t cpu_suspend_map;
#endif
	int suspend_cancelled;

	EVENTHANDLER_INVOKE(power_suspend);

	if (smp_started) {
		thread_lock(curthread);
		sched_bind(curthread, 0);
		thread_unlock(curthread);
	}
	KASSERT((PCPU_GET(cpuid) == 0), ("Not running on CPU#0"));

	/*
	 * Clear our XenStore node so the toolstack knows we are
	 * responding to the suspend request.
	 */
	xs_write(XST_NIL, "control", "shutdown", "");

	/*
	 * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
	 * drivers need this.
	 */
	mtx_lock(&Giant);
	if (DEVICE_SUSPEND(root_bus) != 0) {
		mtx_unlock(&Giant);
		printf("%s: device_suspend failed\n", __func__);
		return;
	}
	mtx_unlock(&Giant);

#ifdef SMP
	CPU_ZERO(&cpu_suspend_map);	/* silence gcc */
	if (smp_started) {
		/*
		 * Suspend other CPUs. This prevents IPIs while we
		 * are resuming, and will allow us to reset per-cpu
		 * vcpu_info on resume.
		 */
		cpu_suspend_map = all_cpus;
		CPU_CLR(PCPU_GET(cpuid), &cpu_suspend_map);
		if (!CPU_EMPTY(&cpu_suspend_map))
			suspend_cpus(cpu_suspend_map);
	}
#endif

	/*
	 * Prevent any races with evtchn_interrupt() handler.
	 */
	disable_intr();
	intr_suspend();
	xen_hvm_suspend();

	suspend_cancelled = HYPERVISOR_suspend(0);

	xen_hvm_resume(suspend_cancelled != 0);
	intr_resume(suspend_cancelled != 0);
	enable_intr();

	/*
	 * Reset grant table info.
	 */
	gnttab_resume();

#ifdef SMP
	if (smp_started && !CPU_EMPTY(&cpu_suspend_map)) {
		/*
		 * Now that event channels have been initialized,
		 * resume CPUs.
		 */
		resume_cpus(cpu_suspend_map);
	}
#endif

	/*
	 * FreeBSD really needs to add DEVICE_SUSPEND_CANCEL or
	 * similar.
	 */
	mtx_lock(&Giant);
	DEVICE_RESUME(root_bus);
	mtx_unlock(&Giant);

	if (smp_started) {
		thread_lock(curthread);
		sched_unbind(curthread);
		thread_unlock(curthread);
	}

	EVENTHANDLER_INVOKE(power_resume);

	if (bootverbose)
		printf("System resumed after suspension\n");

}
开发者ID:ChristosKa,项目名称:freebsd,代码行数:99,代码来源:control.c

示例14: memset

void *semathread(void *param)
{
	int mustgetcpu = 0;
	int first = 1;
	struct params *par = param;
	cpu_set_t mask;
	int policy = SCHED_FIFO;
	struct sched_param schedp;

	memset(&schedp, 0, sizeof(schedp));
	schedp.sched_priority = par->priority;
	sched_setscheduler(0, policy, &schedp);

	if (par->cpu != -1) {
		CPU_ZERO(&mask);
		CPU_SET(par->cpu, &mask);
		if(sched_setaffinity(0, sizeof(mask), &mask) == -1)
			fprintf(stderr,	"WARNING: Could not set CPU affinity "
				"to CPU #%d\n", par->cpu);
	} else
		mustgetcpu = 1;

	par->tid = gettid();

	while (!par->shutdown) {
		if (par->sender) {
			pthread_mutex_lock(&syncmutex[par->num]);

			/* Release lock: Start of latency measurement ... */
			gettimeofday(&par->unblocked, NULL);
			pthread_mutex_unlock(&testmutex[par->num]);
			par->samples++;
			if(par->max_cycles && par->samples >= par->max_cycles)
				par->shutdown = 1;
			if (mustgetcpu)
				par->cpu = get_cpu();
		} else {
			/* Receiver */
			if (!first) {
				pthread_mutex_lock(&syncmutex[par->num]);
				first = 1;
			}
			pthread_mutex_lock(&testmutex[par->num]);

			/* ... Got the lock: End of latency measurement */
			gettimeofday(&par->received, NULL);
			par->samples++;
			timersub(&par->received, &par->neighbor->unblocked,
			    &par->diff);

			if (par->diff.tv_usec < par->mindiff)
				par->mindiff = par->diff.tv_usec;
			if (par->diff.tv_usec > par->maxdiff)
				par->maxdiff = par->diff.tv_usec;
			par->sumdiff += (double) par->diff.tv_usec;
			if (par->tracelimit && par->maxdiff > par->tracelimit) {
				char tracing_enabled_file[MAX_PATH];

				strcpy(tracing_enabled_file, get_debugfileprefix());
				strcat(tracing_enabled_file, "tracing_enabled");
				int tracing_enabled =
				    open(tracing_enabled_file, O_WRONLY);
				if (tracing_enabled >= 0) {
					write(tracing_enabled, "0", 1);
					close(tracing_enabled);
				} else
					snprintf(par->error, sizeof(par->error),
					    "Could not access %s\n",
					    tracing_enabled_file);
				par->shutdown = 1;
				par->neighbor->shutdown = 1;
			}

			if (par->max_cycles && par->samples >= par->max_cycles)
				par->shutdown = 1;
			if (mustgetcpu)
				par->cpu = get_cpu();
			nanosleep(&par->delay, NULL);
			pthread_mutex_unlock(&syncmutex[par->num]);
		}
	}
	par->stopped = 1;
	return NULL;
}
开发者ID:RTAndroid,项目名称:android_external_cyclictest,代码行数:84,代码来源:ptsematest.c

示例15: server_main

int server_main(void)
{
    int fd, r, len;
    void *binder, *cookie;
    bwr_t bwr;
    unsigned char rbuf[RBUF_SIZE], *p;
    bcmd_txn_t *reply;
    tdata_t *tdata = NULL;
    inst_buf_t *inst;
    inst_entry_t copy;

    if (!share_cpus) {
        cpu_set_t cpuset;

        CPU_ZERO(&cpuset);
        CPU_SET(0, &cpuset);
        r = sched_setaffinity(0, sizeof(cpuset), &cpuset);
        if (!r)
            printf("server is bound to CPU 0\n");
        else
            fprintf(stderr, "server failed to be bound to CPU 0\n");
    }

    fd = open("/dev/binder", O_RDWR);
    if (fd < 0) {
        fprintf(stderr, "failed to open binder device\n");
        return -1;
    }

#if (!defined(INLINE_TRANSACTION_DATA))
    if (mmap(NULL, 128 * 1024, PROT_READ, MAP_PRIVATE, fd, 0) == MAP_FAILED) {
        fprintf(stderr, "server failed to mmap shared buffer\n");
        return -1;
    }
#endif

    binder = SVC_BINDER;
    cookie = SVC_COOKIE;

    r = add_service(fd, binder, cookie, service, sizeof(service) / 2);
    if (r < 0) {
        printf("server failed to add instrumentation service\n");
        return -1;
    }
    printf("server added instrumentation service\n");

    r = start_looper(fd);
    if (r < 0) {
        printf("server failed to start looper\n");
        return -1;
    }

    bwr.read_buffer = (unsigned long)rbuf;
    while (1) {
        bwr.read_size = sizeof(rbuf);
        bwr.read_consumed = 0;
        bwr.write_size = 0;

        ioctl_read++;
        r = ioctl(fd, BINDER_WRITE_READ, &bwr);
        if (r < 0) {
            fprintf(stderr, "server failed ioctl\n");
            return r;
        }
        INST_RECORD(&copy);

        p = rbuf;
        len = bwr.read_consumed;
        while (len > 0) {
            r = server_parse_command(p, len, &tdata, &reply);
            //hexdump(tdata, bwr.read_consumed);
            if (r < 0)
                return r;

            p += r;
            len -= r;

#if (defined(SIMULATE_FREE_BUFFER) || !defined(INLINE_TRANSACTION_DATA))
            if (tdata)
                FREE_BUFFER(fd, (void *)tdata->data.ptr.buffer);
#endif
            if (!reply) {
                //hexdump(rbuf, bwr.read_consumed);
                continue;
            }

            inst = (inst_buf_t *)reply->tdata.data.ptr.buffer;
            INST_ENTRY_COPY(inst, "S_RECV", &copy);
            //acsiidump(inst,sizeof(*inst)+data_SZ);
            bwr.write_buffer = (unsigned long)reply;
            bwr.write_size = sizeof(*reply);
            bwr.write_consumed = 0;
            bwr.read_size = 0;

            INST_ENTRY(inst, "S_REPLY");

            ioctl_write++;
            r = ioctl(fd, BINDER_WRITE_READ, &bwr);
            if (r < 0) {
                fprintf(stderr, "server failed reply ioctl\n");
//.........这里部分代码省略.........
开发者ID:BuaaAndroid,项目名称:android-binder-test-benchmarks,代码行数:101,代码来源:binder_tester.c


注:本文中的CPU_ZERO函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。