当前位置: 首页>>代码示例>>C++>>正文


C++ cudaGetDeviceCount函数代码示例

本文整理汇总了C++中cudaGetDeviceCount函数的典型用法代码示例。如果您正苦于以下问题:C++ cudaGetDeviceCount函数的具体用法?C++ cudaGetDeviceCount怎么用?C++ cudaGetDeviceCount使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cudaGetDeviceCount函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: configureGpu

bool configureGpu(bool use_gpu_acceleration, std::vector<int> &valid_devices, int use_all_gpus, 
  int &numBkgWorkers_gpu) {
#ifdef ION_COMPILE_CUDA
  const unsigned long long gpu_mem = 2.5 * 1024 * 1024 * 1024;

  if (!use_gpu_acceleration)
    return false;

  // Get number of GPUs in system
  int num_gpus = 0;
  cudaError_t err = cudaGetDeviceCount( &num_gpus );

  if (err != cudaSuccess) {
    printf("CUDA: No GPU device available. Defaulting to CPU only computation\n");
    return false;
  }

  if ( use_all_gpus )
  {
    // Add all GPUs to the valid device list
    for ( int dev = 0; dev < num_gpus;  dev++ )
      valid_devices.push_back(dev);
  }
  else
  {
    // Only add the highest compute devices to the compute list
    int version = 0;
    int major = 0;
    int minor = 0;
    cudaDeviceProp dev_props;

    // Iterate over GPUs to find the highest compute device
    for ( int dev = 0; dev < num_gpus;  dev++ )
    {
      cudaGetDeviceProperties( &dev_props, dev );
      if ( (dev_props.major*10) + dev_props.minor > version )
      {
        version = (dev_props.major*10) + dev_props.minor;
        major = dev_props.major;
        minor = dev_props.minor;
      }
    }

    for ( int dev = 0; dev < num_gpus;  dev++ )
    {
      cudaGetDeviceProperties(&dev_props, dev);
      if (dev_props.major == major && dev_props.minor == minor) {
        if (dev_props.totalGlobalMem > gpu_mem) {
    valid_devices.push_back(dev);
        }
      }
    } 
  }

  // Set the number of GPU workers and tell CUDA about our list of valid devices
  if (valid_devices.size() > 0) {
    numBkgWorkers_gpu = int(valid_devices.size());
    cudaSetValidDevices( &valid_devices[0], int( valid_devices.size() ) );
  }
  else {
    printf("CUDA: No GPU device available. Defaulting to CPU only computation\n");
    return false;   
  }

 
  PoissonCDFApproxMemo poiss_cache; 
  poiss_cache.Allocate (MAX_POISSON_TABLE_COL,MAX_POISSON_TABLE_ROW,POISSON_TABLE_STEP);
  poiss_cache.GenerateValues(); // fill out my table


  for(int i=valid_devices.size()-1 ; i >= 0; i--){
    try{
      //cudaSetDevice(valid_devices[i]);
      cout << "CUDA "<< valid_devices[i] << ": Creating Context and Constant memory on device with id: "<<  valid_devices[i]<< endl;
      InitConstantMemoryOnGpu(valid_devices[i],poiss_cache);
    }
    catch(cudaException &e) {
      cout << "CUDA "<< valid_devices[i] << ": Context could not be created. removing device with id: "<<  valid_devices[i] << " from valid device list" << endl;
      valid_devices.erase (valid_devices.begin()+i);
      numBkgWorkers_gpu -= 1;
      if(numBkgWorkers_gpu == 0) cout << "CUDA: no context could be created, defaulting to CPU only execution" << endl; 
    }

  }

  if(numBkgWorkers_gpu == 0) return false;

  return true;

#else
  
  return false;

#endif

}
开发者ID:GerritvanNiekerk,项目名称:TS,代码行数:96,代码来源:cudaWrapper.cpp

示例2: main

////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
    pArgc = &argc;
    pArgv = argv;

    printf("%s Starting...\n\n", argv[0]);
    printf(" CUDA Device Query (Runtime API) version (CUDART static linking)\n\n");

    int deviceCount = 0;
    cudaError_t error_id = cudaGetDeviceCount(&deviceCount);

    if (error_id != cudaSuccess)
    {
        printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
        exit(EXIT_FAILURE);
    }

    // This function call returns 0 if there are no CUDA capable devices.
    if (deviceCount == 0)
    {
        printf("There are no available device(s) that support CUDA\n");
    }
    else
    {
        printf("Detected %d CUDA Capable device(s)\n", deviceCount);
    }

    int dev, driverVersion = 0, runtimeVersion = 0;

    for (dev = 0; dev < deviceCount; ++dev)
    {
        cudaSetDevice(dev);
        cudaDeviceProp deviceProp;
        cudaGetDeviceProperties(&deviceProp, dev);

        printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);

        // Console log
        cudaDriverGetVersion(&driverVersion);
        cudaRuntimeGetVersion(&runtimeVersion);
        printf("  CUDA Driver Version / Runtime Version          %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
        printf("  CUDA Capability Major/Minor version number:    %d.%d\n", deviceProp.major, deviceProp.minor);

        char msg[256];
        sprintf(msg, "  Total amount of global memory:                 %.0f MBytes (%llu bytes)\n",
                (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
        printf("%s", msg);

        printf("  (%2d) Multiprocessors x (%3d) CUDA Cores/MP:    %d CUDA Cores\n",
               deviceProp.multiProcessorCount,
               _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
               _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
        printf("  GPU Clock rate:                                %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);


#if CUDART_VERSION >= 5000
        // This is supported in CUDA 5.0 (runtime API device properties)
        printf("  Memory Clock rate:                             %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f);
        printf("  Memory Bus Width:                              %d-bit\n",   deviceProp.memoryBusWidth);

        if (deviceProp.l2CacheSize)
        {
            printf("  L2 Cache Size:                                 %d bytes\n", deviceProp.l2CacheSize);
        }
#else
        // This only available in CUDA 4.0-4.2 (but these were only exposed in the CUDA Driver API)
        int memoryClock;
        getCudaAttribute<int>(&memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev);
        printf("  Memory Clock rate:                             %.0f Mhz\n", memoryClock * 1e-3f);
        int memBusWidth;
        getCudaAttribute<int>(&memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev);
        printf("  Memory Bus Width:                              %d-bit\n", memBusWidth);
        int L2CacheSize;
        getCudaAttribute<int>(&L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev);

        if (L2CacheSize)
        {
            printf("  L2 Cache Size:                                 %d bytes\n", L2CacheSize);
        }
#endif

        printf("  Max Texture Dimension Size (x,y,z)             1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n",
               deviceProp.maxTexture1D   , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
               deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
        printf("  Max Layered Texture Size (dim) x layers        1D=(%d) x %d, 2D=(%d,%d) x %d\n",
               deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1],
               deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);

        printf("  Total amount of constant memory:               %lu bytes\n", deviceProp.totalConstMem);
        printf("  Total amount of shared memory per block:       %lu bytes\n", deviceProp.sharedMemPerBlock);
        printf("  Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
        printf("  Warp size:                                     %d\n", deviceProp.warpSize);
        printf("  Maximum number of threads per multiprocessor:  %d\n", deviceProp.maxThreadsPerMultiProcessor);
        printf("  Maximum number of threads per block:           %d\n", deviceProp.maxThreadsPerBlock);
        printf("  Maximum sizes of each dimension of a block:    %d x %d x %d\n",
               deviceProp.maxThreadsDim[0],
//.........这里部分代码省略.........
开发者ID:BrianBoLiu,项目名称:nvidia_sdk_samples,代码行数:101,代码来源:deviceQuery.cpp

示例3: initQuda

void initQuda(int dev)
{
  static int initialized = 0;
  if (initialized) {
    return;
  }
  initialized = 1;

#if (CUDA_VERSION >= 4000) && defined(MULTI_GPU)
  //check if CUDA_NIC_INTEROP is set to 1 in the enviroment
  char* cni_str = getenv("CUDA_NIC_INTEROP");
  if(cni_str == NULL){
    errorQuda("Environment variable CUDA_NIC_INTEROP is not set\n");
  }
  int cni_int = atoi(cni_str);
  if (cni_int != 1){
    errorQuda("Environment variable CUDA_NIC_INTEROP is not set to 1\n");    
  }
#endif

  int deviceCount;
  cudaGetDeviceCount(&deviceCount);
  if (deviceCount == 0) {
    errorQuda("No devices supporting CUDA");
  }

  for(int i=0; i<deviceCount; i++) {
    cudaDeviceProp deviceProp;
    cudaGetDeviceProperties(&deviceProp, i);
    printfQuda("QUDA: Found device %d: %s\n", i, deviceProp.name);
  }

#ifdef QMP_COMMS
  int ndim;
  const int *dim;

  if ( QMP_is_initialized() != QMP_TRUE ) {
    errorQuda("QMP is not initialized");
  }
  num_QMP=QMP_get_number_of_nodes();
  rank_QMP=QMP_get_node_number();
  
  dev += rank_QMP % deviceCount;
  ndim = QMP_get_logical_number_of_dimensions();
  dim = QMP_get_logical_dimensions();

#elif defined(MPI_COMMS)

  comm_init();
  dev=comm_gpuid();

#else
  if (dev < 0) dev = deviceCount - 1;
#endif
  
  // Used for applying the gauge field boundary condition
  if( commCoords(3) == 0 ) qudaPt0=true;
  else qudaPt0=false;

  if( commCoords(3) == commDim(3)-1 ) qudaPtNm1=true;
  else qudaPtNm1=false;

  cudaDeviceProp deviceProp;
  cudaGetDeviceProperties(&deviceProp, dev);
  if (deviceProp.major < 1) {
    errorQuda("Device %d does not support CUDA", dev);
  }

  
  printfQuda("QUDA: Using device %d: %s\n", dev, deviceProp.name);

  cudaSetDevice(dev);
#ifdef HAVE_NUMA
  if(numa_config_set){
    if(gpu_affinity[dev] >=0){
      printfQuda("Numa setting to cpu node %d\n", gpu_affinity[dev]);
      if(numa_run_on_node(gpu_affinity[dev]) != 0){
        printfQuda("Warning: Setting numa to cpu node %d failed\n", gpu_affinity[dev]);
      }
    }

  }
#endif

  initCache();
  quda::initBlas();
}
开发者ID:witzel,项目名称:quda,代码行数:87,代码来源:interface_quda.cpp

示例4: _papi_nvml_init_substrate

/** Initialize hardware counters, setup the function vector table
 * and get hardware information, this routine is called when the
 * PAPI process is initialized (IE PAPI_library_init)
 */
		int
_papi_nvml_init_substrate( int cidx )
{
		nvmlReturn_t ret;
		cudaError_t cuerr;

		int cuda_count = 0;
		unsigned int nvml_count = 0;

		ret = nvmlInit();
		if ( NVML_SUCCESS != ret ) {
				strcpy(_nvml_vector.cmp_info.disabled_reason, "The NVIDIA managament library failed to initialize.");
				goto disable;
		}

		cuerr = cuInit( 0 );
		if ( CUDA_SUCCESS != cuerr ) {
				strcpy(_nvml_vector.cmp_info.disabled_reason, "The CUDA library failed to initialize.");
				goto disable;
		}

		/* Figure out the number of CUDA devices in the system */
		ret = nvmlDeviceGetCount( &nvml_count );
		if ( NVML_SUCCESS != ret ) {
				strcpy(_nvml_vector.cmp_info.disabled_reason, "Unable to get a count of devices from the NVIDIA managament library.");
				goto disable;
		}

		cuerr = cudaGetDeviceCount( &cuda_count );
		if ( CUDA_SUCCESS != cuerr ) {
				strcpy(_nvml_vector.cmp_info.disabled_reason, "Unable to get a device count from CUDA.");
				goto disable;
		}

		/* We can probably recover from this, when we're clever */
		if ( nvml_count != cuda_count ) {
				strcpy(_nvml_vector.cmp_info.disabled_reason, "Cuda and the NVIDIA managament library have different device counts.");
				goto disable;
		}

		device_count = cuda_count;

		/* A per device representation of what events are present */
		features = (int*)papi_malloc(sizeof(int) * device_count );

		/* Handles to each device */
		devices = (nvmlDevice_t*)papi_malloc(sizeof(nvmlDevice_t) * device_count);

		/* Figure out what events are supported on each card. */
		if ( (papi_errorcode = detectDevices( ) ) != PAPI_OK ) {
			papi_free(features);
			papi_free(devices);
			sprintf(_nvml_vector.cmp_info.disabled_reason, "An error occured in device feature detection, please check your NVIDIA Management Library and CUDA install." );
			goto disable;
		}

		/* The assumption is that if everything went swimmingly in detectDevices, 
			all nvml calls here should be fine. */
		createNativeEvents( );

		/* Export the total number of events available */
		_nvml_vector.cmp_info.num_native_events = num_events;

		/* Export the component id */
		_nvml_vector.cmp_info.CmpIdx = cidx;

		/* Export the number of 'counters' */
		_nvml_vector.cmp_info.num_cntrs = num_events;

		return PAPI_OK;

disable:
		_nvml_vector.cmp_info.num_cntrs = 0;
		return PAPI_OK;	
}
开发者ID:pyrovski,项目名称:papi-rapl,代码行数:79,代码来源:linux-nvml.c

示例5: gpujpeg_init_device

/** Documented at declaration */
int
gpujpeg_init_device(int device_id, int flags)
{
    int dev_count;
    cudaGetDeviceCount(&dev_count);
    if ( dev_count == 0 ) {
        fprintf(stderr, "[GPUJPEG] [Error] No CUDA enabled device\n");
        return -1;
    }

    if ( device_id < 0 || device_id >= dev_count ) {
        fprintf(stderr, "[GPUJPEG] [Error] Selected device %d is out of bound. Devices on your system are in range %d - %d\n",
                device_id, 0, dev_count - 1);
        return -1;
    }

    struct cudaDeviceProp devProp;
    if ( cudaSuccess != cudaGetDeviceProperties(&devProp, device_id) ) {
        fprintf(stderr,
                "[GPUJPEG] [Error] Can't get CUDA device properties!\n"
                "[GPUJPEG] [Error] Do you have proper driver for CUDA installed?\n"
               );
        return -1;
    }

    if ( devProp.major < 1 ) {
        fprintf(stderr, "[GPUJPEG] [Error] Device %d does not support CUDA\n", device_id);
        return -1;
    }

    if ( flags & GPUJPEG_OPENGL_INTEROPERABILITY ) {
        cudaGLSetGLDevice(device_id);
        gpujpeg_cuda_check_error("Enabling OpenGL interoperability");
    }

    if ( flags & GPUJPEG_VERBOSE ) {
        int cuda_driver_version = 0;
        cudaDriverGetVersion(&cuda_driver_version);
        printf("CUDA driver version:   %d.%d\n", cuda_driver_version / 1000, (cuda_driver_version % 100) / 10);

        int cuda_runtime_version = 0;
        cudaRuntimeGetVersion(&cuda_runtime_version);
        printf("CUDA runtime version:  %d.%d\n", cuda_runtime_version / 1000, (cuda_runtime_version % 100) / 10);

        printf("Using Device #%d:       %s (c.c. %d.%d)\n", device_id, devProp.name, devProp.major, devProp.minor);
    }

    cudaSetDevice(device_id);
    gpujpeg_cuda_check_error("Set CUDA device");

    // Test by simple copying that the device is ready
    uint8_t data[] = {8};
    uint8_t* d_data = NULL;
    cudaMalloc((void**)&d_data, 1);
    cudaMemcpy(d_data, data, 1, cudaMemcpyHostToDevice);
    cudaFree(d_data);
    cudaError_t error = cudaGetLastError();
    if ( cudaSuccess != error ) {
        fprintf(stderr, "[GPUJPEG] [Error] Failed to initialize CUDA device.\n");
        if ( flags & GPUJPEG_OPENGL_INTEROPERABILITY )
            fprintf(stderr, "[GPUJPEG] [Info]  OpenGL interoperability is used, is OpenGL context available?\n");
        return -1;
    }

    return 0;
}
开发者ID:zzilla,项目名称:media-streamer,代码行数:67,代码来源:gpujpeg_common.c

示例6: gpuGetMaxGflopsDeviceId

// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId()
{
    int current_device     = 0, sm_per_multiproc  = 0;
    int max_perf_device    = 0;
    int device_count       = 0, best_SM_arch      = 0;
    int devices_prohibited = 0;
    
    unsigned long long max_compute_perf = 0;
    cudaDeviceProp deviceProp;
    cudaGetDeviceCount(&device_count);
    
    checkCudaErrors(cudaGetDeviceCount(&device_count));

    if (device_count == 0)
    {
        fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error: no devices supporting CUDA.\n");
        exit(EXIT_FAILURE);
    }

    // Find the best major SM Architecture GPU device
    while (current_device < device_count)
    {
        cudaGetDeviceProperties(&deviceProp, current_device);

        // If this GPU is not running on Compute Mode prohibited, then we can add it to the list
        if (deviceProp.computeMode != cudaComputeModeProhibited)
        {
            if (deviceProp.major > 0 && deviceProp.major < 9999)
            {
                best_SM_arch = MAX(best_SM_arch, deviceProp.major);
            }
        }
        else
        {
            devices_prohibited++;
        }

        current_device++;
    }

    if (devices_prohibited == device_count)
    {
    	fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error: all devices have compute mode prohibited.\n");
    	exit(EXIT_FAILURE);
    }

    // Find the best CUDA capable GPU device
    current_device = 0;

    while (current_device < device_count)
    {
        cudaGetDeviceProperties(&deviceProp, current_device);

        // If this GPU is not running on Compute Mode prohibited, then we can add it to the list
        if (deviceProp.computeMode != cudaComputeModeProhibited)
        {
            if (deviceProp.major == 9999 && deviceProp.minor == 9999)
            {
                sm_per_multiproc = 1;
            }
            else
            {
                sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
            }

            unsigned long long compute_perf  = (unsigned long long) deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;

            if (compute_perf  > max_compute_perf)
            {
                // If we find GPU with SM major > 2, search only these
                if (best_SM_arch > 2)
                {
                    // If our device==dest_SM_arch, choose this, or else pass
                    if (deviceProp.major == best_SM_arch)
                    {
                        max_compute_perf  = compute_perf;
                        max_perf_device   = current_device;
                    }
                }
                else
                {
                    max_compute_perf  = compute_perf;
                    max_perf_device   = current_device;
                }
            }
        }

        ++current_device;
    }

    return max_perf_device;
}
开发者ID:Hopobcn,项目名称:nbody,代码行数:93,代码来源:helper_cuda.hpp

示例7: parse_cmdline


//.........这里部分代码省略.........
    clp.setOption("fixture",                 &fixtureSpec,  "fixture string: \"XxYxZ\"");
    clp.setOption("fixture-x",               &cmdline.USE_FIXTURE_X,  "fixture");
    clp.setOption("fixture-y",               &cmdline.USE_FIXTURE_Y,  "fixture");
    clp.setOption("fixture-z",               &cmdline.USE_FIXTURE_Z,  "fixture");
    clp.setOption("fixture-quadratic", "no-fixture-quadratic", &cmdline.USE_FIXTURE_QUADRATIC,  "quadratic");

    clp.setOption("atomic", "no-atomic",      &cmdline.USE_ATOMIC ,  "atomic");
    clp.setOption("trials",                   &cmdline.USE_TRIALS,  "trials");
    clp.setOption("xml-file",                 &cmdline.USE_FENL_XML_FILE, "XML file containing solver parameters");
    clp.setOption("belos", "no-belos",        &cmdline.USE_BELOS ,  "use Belos solver");
    clp.setOption("muelu", "no-muelu",        &cmdline.USE_MUELU,  "use MueLu preconditioner");
    clp.setOption("mean-based", "no-mean-based", &cmdline.USE_MEANBASED,  "use mean-based preconditioner");
    if(cmdline.USE_MUELU || cmdline.USE_MEANBASED)
        cmdline.USE_BELOS = true;

    clp.setOption("sampling", &cmdline.USE_UQ_SAMPLING, num_sampling_types, sampling_values, sampling_names, "UQ sampling method");
    clp.setOption("uq-fake",                  &cmdline.USE_UQ_FAKE,  "setup a fake UQ problem of this size");
    clp.setOption("uq-dim",                   &cmdline.USE_UQ_DIM,  "UQ dimension");
    clp.setOption("uq-order",                 &cmdline.USE_UQ_ORDER,  "UQ order");
    clp.setOption("uq-init-level",            &cmdline.USE_UQ_INIT_LEVEL,  "Initial adaptive sparse grid level");
    clp.setOption("uq-max-level",             &cmdline.USE_UQ_MAX_LEVEL,  "Max adaptive sparse grid level");
    clp.setOption("uq-max-samples",           &cmdline.USE_UQ_MAX_SAMPLES,  "Max number of samples to run");
    clp.setOption("uq-tol",                   &cmdline.USE_UQ_TOL,  "Adaptive sparse grid tolerance");
    clp.setOption("diff-coeff-linear",        &cmdline.USE_DIFF_COEFF_LINEAR,  "Linear term in diffusion coefficient");
    clp.setOption("diff-coeff-constant",      &cmdline.USE_DIFF_COEFF_CONSTANT,  "Constant term in diffusion coefficient");
    clp.setOption("mean",                     &cmdline.USE_MEAN,  "KL diffusion mean");
    clp.setOption("var",                      &cmdline.USE_VAR,  "KL diffusion variance");
    clp.setOption("cor",                      &cmdline.USE_COR,  "KL diffusion correlation");
    clp.setOption("exponential", "no-exponential", &cmdline.USE_EXPONENTIAL,  "take exponential of KL diffusion coefficient");
    clp.setOption("exp-shift",                &cmdline.USE_EXP_SHIFT,  "Linear shift of exponential of KL diffusion coefficient");
    clp.setOption("exp-scale",                &cmdline.USE_EXP_SCALE,  "Multiplicative scale of exponential of KL diffusion coefficient");
    clp.setOption("discontinuous-exp-scale", "continuous-exp-scale", &cmdline.USE_DISC_EXP_SCALE,  "use discontinuous scale factor on exponential");
    clp.setOption("isotropic", "anisotropic", &cmdline.USE_ISOTROPIC,  "use isotropic or anisotropic diffusion coefficient");
    clp.setOption("coeff-src",                &cmdline.USE_COEFF_SRC,  "Coefficient for source term");
    clp.setOption("coeff-adv",                &cmdline.USE_COEFF_ADV,  "Coefficient for advection term");
    clp.setOption("sparse", "tensor",         &cmdline.USE_SPARSE ,  "use sparse or tensor grid");
    clp.setOption("ensemble",                 &cmdline.USE_UQ_ENSEMBLE,  "UQ ensemble size.  This needs to be a valid choice based on available instantiations.");
    clp.setOption("grouping", &cmdline.USE_GROUPING, num_grouping_types, grouping_values, grouping_names, "Sample grouping method for ensemble propagation");
    clp.setOption("surrogate-grouping-level", &cmdline.TAS_GROUPING_INITIAL_LEVEL,  "Starting level for surrogate-based grouping");

    clp.setOption("vtune", "no-vtune",       &cmdline.VTUNE ,  "connect to vtune");
    clp.setOption("verbose", "no-verbose",   &cmdline.VERBOSE, "print verbose intialization info");
    clp.setOption("print", "no-print",        &cmdline.PRINT,  "print detailed test output");
    clp.setOption("print-its", "no-print-its",&cmdline.PRINT_ITS,  "print solver iterations after each sample");
    clp.setOption("summarize", "no-summarize",&cmdline.SUMMARIZE,  "summarize Teuchos timers at end of run");

    bool doDryRun = false;
    clp.setOption("echo", "no-echo",          &doDryRun,  "dry-run only");

    switch (clp.parse(argc, argv)) {
    case Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED:
        return CLP_HELP;
    case Teuchos::CommandLineProcessor::PARSE_ERROR:
    case Teuchos::CommandLineProcessor::PARSE_UNRECOGNIZED_OPTION:
        return CLP_ERROR;
    case Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL:
        break;
    }

#if defined( KOKKOS_HAVE_CUDA )
    // Set CUDA device based on local node rank
    if (cmdline.USE_CUDA && cmdline.USE_CUDA_DEV == -1) {
        int local_rank = 0;
        char *str;
        if ((str = std::getenv("MV2_COMM_WORLD_LOCAL_RANK")))
            local_rank = std::atoi(str);
        else if ((str = getenv("OMPI_COMM_WORLD_LOCAL_RANK")))
            local_rank = std::atoi(str);
        else if ((str = std::getenv("SLURM_LOCALID")))
            local_rank = std::atoi(str);
        cmdline.USE_CUDA_DEV = local_rank % cmdline.USE_NGPUS;

        // Check device is valid
        int num_device;
        cudaGetDeviceCount(&num_device);
        TEUCHOS_TEST_FOR_EXCEPTION(
            cmdline.USE_CUDA_DEV >= cmdline.USE_NGPUS, std::logic_error,
            "Invalid device ID " << cmdline.USE_CUDA_DEV << ".  You probably are trying" <<
            " to run with too many GPUs per node");
    }
#endif

    sscanf( fixtureSpec.c_str() , "%dx%dx%d" ,
            &cmdline.USE_FIXTURE_X ,
            &cmdline.USE_FIXTURE_Y ,
            &cmdline.USE_FIXTURE_Z );

    cmdline.USE_UQ = uq;

    if (doDryRun) {
        print_cmdline( std::cout , cmdline );
        cmdline.ECHO  = 1;
    } else {
        cmdline.ECHO  = 0;
    }
    cmdline.ERROR  = 0 ;

    return CLP_OK;

}
开发者ID:mhoemmen,项目名称:Trilinos,代码行数:101,代码来源:fenl_utils.cpp

示例8: main

////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
    pArgc = &argc;
    pArgv = argv;

    printf("%s Starting...\n\n", argv[0]);
    printf(" CUDA Device Query (Runtime API) version (CUDART static linking)\n\n");

    int deviceCount = 0;
    cudaError_t error_id = cudaGetDeviceCount(&deviceCount);

    if (error_id != cudaSuccess)
    {
        printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
        printf("Result = FAIL\n");
        exit(EXIT_FAILURE);
    }

    // This function call returns 0 if there are no CUDA capable devices.
    if (deviceCount == 0)
    {
        printf("There are no available device(s) that support CUDA\n");
    }
    else
    {
        printf("Detected %d CUDA Capable device(s)\n", deviceCount);
    }

    int dev, driverVersion = 0, runtimeVersion = 0;

    for (dev = 0; dev < deviceCount; ++dev)
    {
        cudaSetDevice(dev);
        cudaDeviceProp deviceProp;
        cudaGetDeviceProperties(&deviceProp, dev);

        printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);

        // Console log
        cudaDriverGetVersion(&driverVersion);
        cudaRuntimeGetVersion(&runtimeVersion);
        printf("  CUDA Driver Version / Runtime Version          %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
        printf("  CUDA Capability Major/Minor version number:    %d.%d\n", deviceProp.major, deviceProp.minor);

        char msg[256];
        SPRINTF(msg, "  Total amount of global memory:                 %.0f MBytes (%llu bytes)\n",
                (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
        printf("%s", msg);

        printf("  (%2d) Multiprocessors, (%3d) CUDA Cores/MP:     %d CUDA Cores\n",
               deviceProp.multiProcessorCount,
               _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
               _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
        printf("  GPU Max Clock rate:                            %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);


#if CUDART_VERSION >= 5000
        // This is supported in CUDA 5.0 (runtime API device properties)
        printf("  Memory Clock rate:                             %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f);
        printf("  Memory Bus Width:                              %d-bit\n",   deviceProp.memoryBusWidth);

        if (deviceProp.l2CacheSize)
        {
            printf("  L2 Cache Size:                                 %d bytes\n", deviceProp.l2CacheSize);
        }

#else
        // This only available in CUDA 4.0-4.2 (but these were only exposed in the CUDA Driver API)
        int memoryClock;
        getCudaAttribute<int>(&memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev);
        printf("  Memory Clock rate:                             %.0f Mhz\n", memoryClock * 1e-3f);
        int memBusWidth;
        getCudaAttribute<int>(&memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev);
        printf("  Memory Bus Width:                              %d-bit\n", memBusWidth);
        int L2CacheSize;
        getCudaAttribute<int>(&L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev);

        if (L2CacheSize)
        {
            printf("  L2 Cache Size:                                 %d bytes\n", L2CacheSize);
        }

#endif

        printf("  Maximum Texture Dimension Size (x,y,z)         1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n",
               deviceProp.maxTexture1D   , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
               deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
        printf("  Maximum Layered 1D Texture Size, (num) layers  1D=(%d), %d layers\n",
               deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]);
        printf("  Maximum Layered 2D Texture Size, (num) layers  2D=(%d, %d), %d layers\n",
               deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);


        printf("  Total amount of constant memory:               %lu bytes\n", deviceProp.totalConstMem);
        printf("  Total amount of shared memory per block:       %lu bytes\n", deviceProp.sharedMemPerBlock);
        printf("  Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
//.........这里部分代码省略.........
开发者ID:biocyberman,项目名称:TS,代码行数:101,代码来源:deviceQuery.cpp

示例9: PetscOptionsCheckInitial_Private


//.........这里部分代码省略.........
  ierr = PetscOptionsGetString(NULL,"-log_trace",mname,250,&flg1);CHKERRQ(ierr);
  if (flg1) {
    char name[PETSC_MAX_PATH_LEN],fname[PETSC_MAX_PATH_LEN];
    FILE *file;
    if (mname[0]) {
      sprintf(name,"%s.%d",mname,rank);
      ierr = PetscFixFilename(name,fname);CHKERRQ(ierr);
      file = fopen(fname,"w");
      if (!file) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_FILE_OPEN,"Unable to open trace file: %s",fname);
    } else file = PETSC_STDOUT;
    ierr = PetscLogTraceBegin(file);CHKERRQ(ierr);
  }

  ierr   = PetscOptionsGetViewer(PETSC_COMM_WORLD,NULL,"-log_view",NULL,&format,&flg4);CHKERRQ(ierr);
  if (flg4) {
    if (format == PETSC_VIEWER_ASCII_XML){
      ierr = PetscLogNestedBegin();CHKERRQ(ierr);
    } else {
      ierr = PetscLogDefaultBegin();CHKERRQ(ierr);
    }
  }
#endif

  ierr = PetscOptionsGetBool(NULL,"-saws_options",&PetscOptionsPublish,NULL);CHKERRQ(ierr);

#if defined(PETSC_HAVE_CUDA)
  ierr = PetscOptionsHasName(NULL,"-cuda_show_devices",&flg1);CHKERRQ(ierr);
  if (flg1) {
    struct cudaDeviceProp prop;
    int                   devCount;
    int                   device;
    cudaError_t           err = cudaSuccess;

    err = cudaGetDeviceCount(&devCount);
    if (err != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SYS,"error in cudaGetDeviceCount %s",cudaGetErrorString(err));
    for (device = 0; device < devCount; ++device) {
      err = cudaGetDeviceProperties(&prop, device);
      if (err != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SYS,"error in cudaGetDeviceProperties %s",cudaGetErrorString(err));
      ierr = PetscPrintf(PETSC_COMM_WORLD, "CUDA device %d: %s\n", device, prop.name);CHKERRQ(ierr);
    }
  }
  {
    int size;
    ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
    if (size>1) {
      int         devCount, device, rank;
      cudaError_t err = cudaSuccess;

      /* check to see if we force multiple ranks to hit the same GPU */
      ierr = PetscOptionsGetInt(NULL,"-cuda_set_device", &device, &flg1);CHKERRQ(ierr);
      if (flg1) {
        err = cudaSetDevice(device);
        if (err != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SYS,"error in cudaSetDevice %s",cudaGetErrorString(err));
      } else {
        /* we're not using the same GPU on multiple MPI threads. So try to allocated different   GPUs to different processes */

        /* First get the device count */
        err   = cudaGetDeviceCount(&devCount);
        if (err != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SYS,"error in cudaGetDeviceCount %s",cudaGetErrorString(err));

        /* next determine the rank and then set the device via a mod */
        ierr   = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
        device = rank % devCount;
        err    = cudaSetDevice(device);
        if (err != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SYS,"error in cudaSetDevice %s",cudaGetErrorString(err));
      }
开发者ID:pombredanne,项目名称:petsc,代码行数:67,代码来源:init.c

示例10: main

int main(int argc, char** argv)
{
	bool srcbin = 0;
	bool invbk = 0;
	if(argc < 3){
		printf("Not enough args!\narg1: target image\narg2: source image\narg3: do source image adaptive threshold or not\narg4: invert back ground or not\n");
		getchar();
		return 1;
	}
	if(argc >= 4){
		if(!strcmp(argv[3], "1"))
			srcbin = 1;
	}
	if(argc >= 5){
		if(!strcmp(argv[4], "1"))
			invbk = 1;
	}

	IplImage* srcimg= 0, *srcimgb= 0, *srcimgb2 = 0, *bimg = 0, *b2img = 0,*bugimg = 0, *alg2dst = 0;
	srcimg= cvLoadImage(argv[2], 1);
	if (!srcimg)
	{
		printf("src img %s load failed!\n", argv[2]);
		getchar();
		return 1;
	}
	
	//choosing the parameters for our ccl
	int bn = 8; //how many partitions
	int nwidth = 512;
	if(srcimg->width > 512){
		nwidth = 1024;
		bn = 6;
	}
	if(srcimg->width > 1024){
		nwidth = 2048;
		bn = 3;
	}
	if(srcimg->width > 2048){
		printf("warning, image too wide, max support 2048. image is truncated.\n");
		getchar();
		return 1;
	}
	
	//start selection gpu devices
	int devCount;
	int smCnt = 0;
    cudaGetDeviceCount(&devCount);
 
    // Iterate through devices
	int devChosen = 0;
    for (int i = 0; i < devCount; ++i)
    {
        cudaDeviceProp devProp;
        cudaGetDeviceProperties(&devProp, i);
		if(devProp.major >= 2){//only one device supported
			smCnt = max(smCnt, devProp.multiProcessorCount);
			if(devProp.multiProcessorCount == smCnt)
				devChosen = i;
		}
    }
	
	if(smCnt == 0){
		//our ccl require CUDA cap 2.0 or above, but the Ostava's ccl can be run on any CUDA gpu
		printf("Error, no device with cap 2.x found. Only cpu alg will be run.\n");
		getchar();
		return 1;
	}
	
	if(smCnt != 0){
		cudaSetDevice(devChosen);
		bn = bn * smCnt;
	}

	int nheight = (cvGetSize(srcimg).height-2) / (2*bn);
	if((nheight*2*bn+2) < cvGetSize(srcimg).height)
		nheight++;
	nheight = nheight*2*bn+2;

	if(smCnt != 0)
		printf("gpu ccl for image width 512, 1024, 2048.\nchoosing device %d, width %d, height %d, blocks %d\n", devChosen, nwidth, nheight, bn);

	srcimgb= cvCreateImage(cvSize(nwidth, cvGetSize(srcimg).height),IPL_DEPTH_8U,1);
	srcimgb2= cvCreateImage(cvSize(nwidth, cvGetSize(srcimg).height),IPL_DEPTH_8U,1);
	cvSetImageROI(srcimg, cvRect(0, 0, min(cvGetSize(srcimg).width, nwidth), cvGetSize(srcimg).height));
	cvSetImageROI(srcimgb2, cvRect(0, 0, min(cvGetSize(srcimg).width, nwidth), cvGetSize(srcimg).height));
	cvSet(srcimgb2, cvScalar(0,0,0));
	cvCvtColor(srcimg, srcimgb2, CV_BGRA2GRAY);
	cvResetImageROI(srcimgb2);
	cvReleaseImage(&srcimg);
	if(srcbin)
		cvAdaptiveThreshold(srcimgb2, srcimgb, 1.0, CV_ADAPTIVE_THRESH_MEAN_C, invbk ? CV_THRESH_BINARY_INV :  CV_THRESH_BINARY);
	else
		cvThreshold(srcimgb2, srcimgb, 0.0, 1.0, invbk ? CV_THRESH_BINARY_INV :  CV_THRESH_BINARY);
	boundCheck(srcimgb);

	cvScale(srcimgb, srcimgb2, 255);
	//the source binary image to be labeled is saved as bsrc.bmp
	cvSaveImage("bsrc.bmp", srcimgb2);
	cvSet(srcimgb2, cvScalar(0,0,0));
//.........这里部分代码省略.........
开发者ID:akonneker,项目名称:gpu-2d-binary-ccl,代码行数:101,代码来源:main.cpp

示例11: main

////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv) 
{
    pArgc = &argc;
    pArgv = argv;

    /*   shrQAStart(argc, argv);

    shrSetLogFileName ("deviceQuery.txt");


 */
    shrLog("%s Starting...\n\n", argv[0]);
    shrLog(" CUDA Device Query (Runtime API) version (CUDART static linking)\n\n");

    int deviceCount = 0;
    cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
    if (error_id != cudaSuccess) {
        shrLog( "cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id) );
        return -1;
    }
    // This function call returns 0 if there are no CUDA capable devices.
    if (deviceCount == 0)
        shrLog("There is no device supporting CUDA\n");
    else
        shrLog("Found %d CUDA Capable device(s)\n", deviceCount);

    int dev, driverVersion = 0, runtimeVersion = 0;
    for (dev = 0; dev < deviceCount; ++dev) {
        cudaDeviceProp deviceProp;
        cudaGetDeviceProperties(&deviceProp, dev);

        shrLog("\nDevice %d: \"%s\"\n", dev, deviceProp.name);

#if CUDART_VERSION >= 2020
        // Console log
        cudaDriverGetVersion(&driverVersion);
        cudaRuntimeGetVersion(&runtimeVersion);
        shrLog("  CUDA Driver Version / Runtime Version          %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
#endif
        shrLog("  CUDA Capability Major/Minor version number:    %d.%d\n", deviceProp.major, deviceProp.minor);

        char msg[256];
        sprintf(msg, "  Total amount of global memory:                 %.0f MBytes (%llu bytes)\n",
                (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
        shrLog(msg);
#if CUDART_VERSION >= 2000
        shrLog("  (%2d) Multiprocessors x (%2d) CUDA Cores/MP:     %d CUDA Cores\n",
               deviceProp.multiProcessorCount,
               ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
               ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
#endif
        shrLog("  GPU Clock Speed:                               %.2f GHz\n", deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 4000
        // This is not available in the CUDA Runtime API, so we make the necessary calls the driver API to support this for output
        int memoryClock;
        getCudaAttribute<int>( &memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev );
        shrLog("  Memory Clock rate:                             %.2f Mhz\n", memoryClock * 1e-3f);
        int memBusWidth;
        getCudaAttribute<int>( &memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev );
        shrLog("  Memory Bus Width:                              %d-bit\n", memBusWidth);
        int L2CacheSize;
        getCudaAttribute<int>( &L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev );
        if (L2CacheSize) {
            shrLog("  L2 Cache Size:                                 %d bytes\n", L2CacheSize);
        }

        shrLog("  Max Texture Dimension Size (x,y,z)             1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n",
               deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
                deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
        shrLog("  Max Layered Texture Size (dim) x layers        1D=(%d) x %d, 2D=(%d,%d) x %d\n",
               deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1],
                deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);
#endif
        shrLog("  Total amount of constant memory:               %u bytes\n", deviceProp.totalConstMem);
        shrLog("  Total amount of shared memory per block:       %u bytes\n", deviceProp.sharedMemPerBlock);
        shrLog("  Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
        shrLog("  Warp size:                                     %d\n", deviceProp.warpSize);
        shrLog("  Maximum number of threads per block:           %d\n", deviceProp.maxThreadsPerBlock);
        shrLog("  Maximum sizes of each dimension of a block:    %d x %d x %d\n",
               deviceProp.maxThreadsDim[0],
                deviceProp.maxThreadsDim[1],
                deviceProp.maxThreadsDim[2]);
        shrLog("  Maximum sizes of each dimension of a grid:     %d x %d x %d\n",
               deviceProp.maxGridSize[0],
                deviceProp.maxGridSize[1],
                deviceProp.maxGridSize[2]);
        shrLog("  Maximum memory pitch:                          %u bytes\n", deviceProp.memPitch);
        shrLog("  Texture alignment:                             %u bytes\n", deviceProp.textureAlignment);

#if CUDART_VERSION >= 4000
        shrLog("  Concurrent copy and execution:                 %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount);
#else
        shrLog("  Concurrent copy and execution:                 %s\n", deviceProp.deviceOverlap ? "Yes" : "No");
#endif

#if CUDART_VERSION >= 2020
//.........这里部分代码省略.........
开发者ID:seichter,项目名称:kfusion,代码行数:101,代码来源:devicequery.cpp

示例12: getDeviceProps

int getDeviceProps (int *deviceCount, char **deviceProps) {
    // Cuda Runtime interface
    void *cudaRT = NULL;
    cudaGetDeviceCount_f cudaGetDeviceCount = NULL;
    cudaGetDeviceProperties_f cudaGetDeviceProperties = NULL;

    cudaError_t cuErr;
    int ndevices; // Number of devices reported by Cuda runtime
    int undevices = 0; // Number of devices that are unusable by simEngine
    unsigned int deviceid;
    unsigned int sort;
    simCudaDevice *devices;

    cudaRT = dlopen(CUDART_LIBRARY_NAME, RTLD_NOW);
    if(!cudaRT) {
        char full_library_name[PATH_MAX];
        sprintf(full_library_name, "/usr/local/cuda/lib64/%s", CUDART_LIBRARY_NAME);
        cudaRT = dlopen(full_library_name, RTLD_NOW);
        if(!cudaRT) {
            sprintf(full_library_name, "/usr/local/cuda/lib/%s", CUDART_LIBRARY_NAME);
            cudaRT = dlopen(full_library_name, RTLD_NOW);
            if(!cudaRT) {
                snprintf(error_message, BUFFER_LENGTH,
                         "Failed to load CUDA runtime environment from %s.\n"
                         "\tIs the CUDA runtime environment installed in the default location\n"
                         "\tOR is LD_LIBRARY_PATH environment variable set to include CUDA libraries?",
                         CUDART_LIBRARY_NAME);
                error_message[BUFFER_LENGTH - 1] = '\0';
                return DeviceProps_NoCudaRuntime;
            }
        }
    }

    cudaGetDeviceCount = (cudaGetDeviceCount_f)dlsym(cudaRT, "cudaGetDeviceCount");
    cudaGetDeviceProperties = (cudaGetDeviceProperties_f)dlsym(cudaRT, "cudaGetDeviceProperties");

    if(!cudaGetDeviceCount || !cudaGetDeviceProperties) {
        snprintf(error_message, BUFFER_LENGTH,
                 "Failed to load CUDA functions from %s.\n"
                 "\tThe CUDA library found is incompatible with simEngine.",
                 CUDART_LIBRARY_NAME);
        error_message[BUFFER_LENGTH - 1] = '\0';
        return DeviceProps_NoCudaRuntime;
    }

    if (cudaSuccess != cudaGetDeviceCount(&ndevices)) {
        snprintf(error_message, BUFFER_LENGTH,
                 "Error obtaining device count.\n"
                 "\tIs there a CUDA capable GPU available on this computer?");
        error_message[BUFFER_LENGTH - 1] = '\0';
        return DeviceProps_UnknownError;
    }

    if (0 == ndevices) {
        snprintf(error_message, BUFFER_LENGTH,
                 "No suitable devices found.\n"
                 "\tIs your CUDA driver installed, and have you rebooted since installation?");
        error_message[BUFFER_LENGTH - 1] = '\0';
        return DeviceProps_NoDevices;
    }

    devices = (simCudaDevice *)malloc(sizeof(simCudaDevice) * ndevices);

    // Retrieve the properties for all Cuda devices
    for (deviceid = 0; deviceid < ndevices; ++deviceid) {
        if (cudaSuccess != cudaGetDeviceProperties(&devices[deviceid-undevices].props, deviceid)) {
            snprintf(error_message, BUFFER_LENGTH,
                     "Error obtaining properties for device %d.\n"
                     "\tThe CUDA library found is incompatible with simEngine.",
                     deviceid);
            error_message[BUFFER_LENGTH - 1] = '\0';
            free(devices);
            return DeviceProps_UnknownError;
        }
        // Filter out emulation devices
        if(9999 == devices[deviceid-undevices].props.major) {
            undevices += 1;
        }
        // Track GFLOPs of real devices
        else {
            devices[deviceid-undevices].gflops = devices[deviceid-undevices].props.multiProcessorCount * devices[deviceid-undevices].props.clockRate;
            devices[deviceid-undevices].unsorted = 1;
        }
    }

    // Subtract emulation devices from device count
    *deviceCount = ndevices - undevices;
    if (0 == *deviceCount) {
        snprintf(error_message, BUFFER_LENGTH,
                 "Only emulation device found.\n"
                 "\tDo you have a CUDA device?\n"
                 "\tIs the CUDA driver installed?\n"
                 "\tHave you rebooted after installing the driver?\n"
                 "\tDo you have device permissions set to allow CUDA computation?");
        error_message[BUFFER_LENGTH - 1] = '\0';
        free(devices);
        return DeviceProps_EmulationOnly;
    }


//.........这里部分代码省略.........
开发者ID:tuchwando,项目名称:simengine,代码行数:101,代码来源:device_props.c

示例13: main

int main(int argc, char* atgv[])
{
    int ndevices = 0;
    cudaError_t cuda_status = cudaGetDeviceCount(&ndevices);
    if (cuda_status != cudaSuccess)
    {
        fprintf(stderr, "Cannot get the cuda device count, status = %d: %s\n",
                cuda_status, cudaGetErrorString(cuda_status));
        return cuda_status;
    }

    // Return if no cuda devices present.
    printf("%d CUDA device(s) found\n", ndevices);
    if (!ndevices) return 0;

    // Create input data. Each device will have an equal
    // piece of data.
    size_t np = nx * ny, size = np * sizeof(float);
    float* data = (float*)malloc(size * 2);
    float *input = data, *output = data + np;
    float invdrandmax = 1.0 / RAND_MAX;
    for (size_t i = 0; i < np; i++)
        input[i] = rand() * invdrandmax;

    struct time_t start, finish;
    get_time(&start);

    // Get control result on CPU (to compare with results on devices).
    pattern2d_cpu(1, nx, 1, 1, ny, 1, input, output, ndevices);

    get_time(&finish);

    printf("CPU time = %f sec\n", get_time_diff(&start, &finish));

    // Create config structures to store device-specific
    // values.
    config_t* configs = (config_t*)malloc(
                            sizeof(config_t) * ndevices);

    // Initialize CUDA devices.
    for (int idevice = 0; idevice < ndevices; idevice++)
    {
        config_t* config = configs + idevice;

        // TODO: Set curent CUDA device to idevice.
        cudaError_t cuda_status = cudaSetDevice(idevice);
        if (cuda_status != cudaSuccess) {
            fprintf(stderr, "Cannot get the cuda device count, status = %d: %s\n",
                    cuda_status, cudaGetErrorString(cuda_status));
            return cuda_status;
        }

        // Create device arrays for input and output data.
        cuda_status = cudaMalloc((void**)&config->in_dev, size);
        if (cuda_status != cudaSuccess)
        {
            fprintf(stderr, "Cannot allocate CUDA input buffer on device %d, status = %d: %s\n",
                    idevice, cuda_status, cudaGetErrorString(cuda_status));
            return cuda_status;
        }
        cuda_status = cudaMalloc((void**)&config->out_dev, size);
        if (cuda_status != cudaSuccess)
        {
            fprintf(stderr, "Cannot allocate CUDA output buffer on device %d, status = %d: %s\n",
                    idevice, cuda_status, cudaGetErrorString(cuda_status));
            return cuda_status;
        }

        // Copy input data to device buffer.
        cuda_status = cudaMemcpy(config->in_dev, input, size,
                                 cudaMemcpyHostToDevice);
        if (cuda_status != cudaSuccess)
        {
            fprintf(stderr, "Cannot copy input data to CUDA buffer on device %d, status = %d: %s\n",
                    idevice, cuda_status, cudaGetErrorString(cuda_status));
            return cuda_status;
        }

        printf("Device %d initialized\n", idevice);
    }

    // Start execution of kernels. One kernel
    // is executed on each device in parallel.
    for (int idevice = 0; idevice < ndevices; idevice++)
    {
        config_t* config = configs + idevice;

        // TODO: Set curent CUDA device to idevice.
        cudaError_t cuda_status = cudaSetDevice(idevice);

        get_time(&config->start);

        // Run test kernel on the current device.
        int status = pattern2d_gpu(1, nx, 1, 1, ny, 1,
                                   config->in_dev, config->out_dev, idevice);
        if (status)
        {
            fprintf(stderr, "Cannot execute pattern 2d on device %d, status = %d: %s\n",
                    idevice, status, cudaGetErrorString(status));
            return status;
//.........这里部分代码省略.........
开发者ID:slitvinov,项目名称:cuda-examples,代码行数:101,代码来源:serial_cuda.c

示例14: main


//.........这里部分代码省略.........
	json_object_set(meta_step, "version", json_string(__VERSION));

	json_t *meta_parameters = json_object();
	json_object_set(meta_step, "parameters", meta_parameters);

	json_t *meta_steps = json_array();
	json_object_set(meta_step, "iterations", meta_steps);

	json_t *meta_results = json_object();
	json_object_set(meta_step, "results", meta_results);

#endif

	int ncol, nrow;
	unsigned char* msa = read_msa(msafile, &ncol, &nrow);
	fclose(msafile);

	int nsingle = ncol * (N_ALPHA - 1);
	int nvar = nsingle + ncol * ncol * N_ALPHA * N_ALPHA;
	int nsingle_padded = nsingle + N_ALPHA_PAD - (nsingle % N_ALPHA_PAD);
	int nvar_padded = nsingle_padded + ncol * ncol * N_ALPHA * N_ALPHA_PAD;

#ifdef CURSES
	bool color = detect_colors();
#else
	bool color = false;
#endif

	logo(color);

#ifdef CUDA
	int num_devices, dev_ret;
	struct cudaDeviceProp prop;
	dev_ret = cudaGetDeviceCount(&num_devices);
	if(dev_ret != CUDA_SUCCESS) {
		num_devices = 0;
	}


	if(num_devices == 0) {
		printf("No CUDA devices available, ");
		use_def_gpu = -1;
	} else if (use_def_gpu < -1 || use_def_gpu >= num_devices) {
		printf("Error: %d is not a valid device number. Please choose a number between 0 and %d\n", use_def_gpu, num_devices - 1);
		exit(1);
	} else {
		printf("Found %d CUDA devices, ", num_devices);
	}

	if (use_def_gpu != -1) {
		cudaError_t err = cudaSetDevice(use_def_gpu);
		if(cudaSuccess != err) {
			printf("Error setting device: %d\n", err);
			exit(1);
		}
		cudaGetDeviceProperties(&prop, use_def_gpu);
		printf("using device #%d: %s\n", use_def_gpu, prop.name);

		size_t mem_free, mem_total;
		err = cudaMemGetInfo(&mem_free, &mem_total);
		if(cudaSuccess != err) {
			printf("Error getting memory info: %d\n", err);
			exit(1);
		}

		size_t mem_needed = nrow * ncol * 2 + // MSAs
开发者ID:susannvorberg,项目名称:CCMpred,代码行数:67,代码来源:ccmpred.c

示例15: thread_task


//.........这里部分代码省略.........
    int ** gpuCost = new int * [ingr_node_counter];
    int ** sequentialCost = new int * [ingr_node_counter];
    int ** oldSequentialCost = new int * [ingr_node_counter];
    int ** multipleGPUResult = new int * [ingr_node_counter];
    
    for(int i = 0; i < ingr_node_counter; i++)
    {
        gpuCost[i] = new int[numVertices];
        sequentialCost[i] = new int[numVertices];
        oldSequentialCost[i] = new int[numVertices];
        multipleGPUResult[i] = new int[numVertices];
    }

    int * source = new int [ingr_node_counter];

    std::cout << std::endl << "Running Sequential Algorithm..." << std::endl;

    // loop for all ingress nodes
    for (int i = 0; i < ingr_node_counter; i++) 
    {
        source[i] = random_ingress_node_selector(*gr);
        hostElapsedTime += processGraphSequential(gr, source[i], sequentialCost[i]);
    }

    //totalOldBFSSequential += oldHostElapsedTime;
    totalNewBFSSequential += hostElapsedTime;

    std::cout << "Finished Sequential" << std::endl;

    // print the CPU results to a file and the screen
    std::cout << ID_ << ", CPU Execution time (sec),                   " << hostElapsedTime << ", Num Vertices, " << numVertices << ", Num Ingress, " << ingr_node_counter  << std::endl; 
    timingFile << ID_ << ", CPU Execution time (sec),                  " << hostElapsedTime << ", Num Vertices, " << numVertices << ", Num Ingress, " << ingr_node_counter  << std::endl; 






    std::cout << std::endl << "Running Single GPU Algorithm..." << std::endl;
    totalSingleGPU += singleDeviceElapsedTime = processGraphSingleGPU(gr, source, ingr_node_counter, gpuCost);     
    std::cout << "Finished Single GPU" << std::endl;

    // print the Single GPU results to a file and the screen
    std::cout << ID_ << ", CUDA Single Device Execution time (sec),    " << singleDeviceElapsedTime << ", Num Vertices, " << numVertices << ", Num Ingress, " << ingr_node_counter  << std::endl;
    timingFile << ID_ << ", CUDA Single Device Execution time (sec),   " << singleDeviceElapsedTime << ", Num Vertices, " << numVertices << ", Num Ingress, " << ingr_node_counter  << std::endl; 






    // CALL MULTI-GPU FUNCTION HERE!!!!
    std::cout << std::endl << "Running Multiple GPU Algorithm..." << std::endl;
    int numDevices;
    cudaGetDeviceCount(&numDevices);

    for(int i = 2; i <= numDevices; i++)
    {
        multipleDeviceElapsedTime = processGraphMultipleGPU(gr, source, ingr_node_counter, multipleGPUResult, i);

        std::cout << ID_ << ", CUDA Multiple Device Execution time (sec),  " << multipleDeviceElapsedTime << ", Num GPUs, " << i << ", Num Vertices, " << numVertices << ", Num Ingress, " << ingr_node_counter  << std::endl;
        timingFile << ID_ << ", CUDA Multiple Device Execution time (sec), " << multipleDeviceElapsedTime << ", Num GPUs, " << i << ", Num Vertices, " << numVertices << ", Num Ingress, " << ingr_node_counter  << std::endl;
    }
    std::cout << "Finished Multiple GPU" << std::endl << std::endl;




    std::cout << std::endl << "Verifying Results..." << std::endl;

    // verify results
    if(verifyResults(sequentialCost, gpuCost, multipleGPUResult, numVertices, ingr_node_counter))
        std::cout << "Results Match!!" << std::endl;
    else
        std::cout << "NOT EQUAL!!" << std::endl;

    std::cout << "Finished Verifying" << std::endl;

    // clean up memory   
    for(int i = 0; i < ingr_node_counter; i++)
    {
        delete [] gpuCost[i];
        delete [] sequentialCost[i];
        delete [] oldSequentialCost[i];
        delete [] multipleGPUResult[i];
    }


    // deallocate memory
    delete [] gpuCost;
    delete [] sequentialCost;
    delete [] oldSequentialCost;
    delete [] multipleGPUResult;
    delete [] source;
    delete b_topology;
    delete gr;
        
    std::cout << std::endl << "------------ FINISHED PROCESSING --------------------" << std::endl << std::endl;

}
开发者ID:eklukovich,项目名称:CUDA-BFS,代码行数:101,代码来源:main.cpp


注:本文中的cudaGetDeviceCount函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。