当前位置: 首页>>代码示例>>C++>>正文


C++ cudaMemGetInfo函数代码示例

本文整理汇总了C++中cudaMemGetInfo函数的典型用法代码示例。如果您正苦于以下问题:C++ cudaMemGetInfo函数的具体用法?C++ cudaMemGetInfo怎么用?C++ cudaMemGetInfo使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cudaMemGetInfo函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: cuda_init_memopt

int cuda_init_memopt(void) 
{
	int num_devices = cuda_devices();
	int device;
	int max_device = 0;

	if (num_devices > 1) {

		size_t mem_max = 0;
		size_t mem_free;
		size_t mem_total;

		for (device = 0; device < num_devices; device++) {

			cuda_init(device);
			CUDA_ERROR(cudaMemGetInfo(&mem_free, &mem_total));
			//printf(" device (%d): %d\n", device, mem_available);

			if (mem_max < mem_free) {

				mem_max = mem_free;
				max_device = device;
			}
		}
		//printf(" max device: %d\n", max_device);
		CUDA_ERROR(cudaSetDevice(max_device));
		// FIXME: we should set last_init
	}

	return max_device;
}
开发者ID:frankong,项目名称:bart,代码行数:31,代码来源:gpuops.c

示例2: getMemoryInfo

    /**
     * Returns information about device memory.
     *
     * @param free amount of free memory in bytes. can be NULL
     * @param total total amount of memory in bytes. can be NULL. (NULL by default)
     */
    void getMemoryInfo(size_t *free, size_t *total = NULL)
    {
        size_t freeInternal = 0;
        size_t totalInternal = 0;

        CUDA_CHECK(cudaMemGetInfo(&freeInternal, &totalInternal));

        if (free != NULL)
        {
            if (reservedMem > freeInternal)
                freeInternal = 0;
            else
                freeInternal -= reservedMem;

            *free = freeInternal;
        }
        if (total != NULL)
        {
            if (reservedMem > totalInternal)
                totalInternal = 0;
            else
                totalInternal -= reservedMem;

            *total = totalInternal;
        }
    }
开发者ID:BenjaminW3,项目名称:picongpu,代码行数:32,代码来源:MemoryInfo.hpp

示例3: THCudaMemGetInfoCached

cudaError_t THCudaMemGetInfoCached(THCState *state,  size_t* freeBytes, size_t* totalBytes, size_t* largestBlock)
{
  size_t cachedBytes = 0;
  THCDeviceAllocator* allocator = state->cudaDeviceAllocator;

  *largestBlock = 0;
  /* get info from CUDA first */
  cudaError_t ret = cudaMemGetInfo(freeBytes, totalBytes);
  if (ret!= cudaSuccess)
    return ret;

  int device;
  ret = cudaGetDevice(&device);
  if (ret!= cudaSuccess)
    return ret;

  /* not always true - our optimistic guess here */
  *largestBlock = *freeBytes;

  if (allocator->cacheInfo != NULL)
    allocator->cacheInfo(allocator->state, device, &cachedBytes, largestBlock);

  /* Adjust resulting free bytes number. largesBlock unused for now */
  *freeBytes += cachedBytes;
  return cudaSuccess;
}
开发者ID:HustlehardInc,项目名称:pytorch,代码行数:26,代码来源:THCGeneral.cpp

示例4: cuda_available_memory

// return free memory in megabytes
int cuda_available_memory(int thr_id)
{
	int dev_id = device_map[thr_id % MAX_GPUS];
	size_t mtotal, mfree = 0;
	cudaSetDevice(dev_id);
	cudaMemGetInfo(&mfree, &mtotal);
	return (int) (mfree / (1024 * 1024));
}
开发者ID:jcvernaleo,项目名称:ccminer,代码行数:9,代码来源:cuda.cpp

示例5: getDeviceMemoryInfoInMb

void getDeviceMemoryInfoInMb(int device, size_t *total, size_t *free) {
  static const int bytesInMb = 1024 * 1024;
  size_t freeInBytes;
  size_t totalInBytes;
  CHECK_ERR(cudaGetDevice(&device));
  CHECK_ERR(cudaMemGetInfo(&freeInBytes, &totalInBytes));
  *total = totalInBytes / bytesInMb;
  *free = freeInBytes / bytesInMb;
}
开发者ID:amznlabs,项目名称:amazon-dsstne,代码行数:9,代码来源:cudautil.cpp

示例6: showMemoryInfo

/** \brief Debug short device memory information (free/total) to stream if DEBUG
 *flag is set to true.
 *
 * @param force always print output
 * @param stream output stream
 */
inline void showMemoryInfo(bool force, FILE *stream)
{
  size_t free_mem = 0;
  size_t total_mem = 0;
  cudaMemGetInfo(&free_mem, &total_mem);
  if (DEBUG || force)
    fprintf(stream, "memory usage, free: %lu total: %lu\n", free_mem,
            total_mem);
}
开发者ID:andyschwarzl,项目名称:gpuNUFFT,代码行数:15,代码来源:cuda_utils.hpp

示例7: cutorch_getMemoryUsage

static int cutorch_getMemoryUsage(lua_State *L) {
  size_t freeBytes = 0;
  size_t totalBytes = 0;
  int curDevice;
  THCudaCheck(cudaGetDevice(&curDevice));

  int device = luaL_optint(L, 1, -10);
  if (device == -10) { /* no argument passed, current device mem usage */
    THCudaCheck(cudaMemGetInfo(&freeBytes, &totalBytes));
  } else { /* argument was given, particular device's memory usage */
    THCudaCheck(cudaSetDevice(device-1)); /* zero indexed */
    THCudaCheck(cudaMemGetInfo(&freeBytes, &totalBytes));
    THCudaCheck(cudaSetDevice(curDevice));
  }
  lua_pushnumber(L, freeBytes);
  lua_pushnumber(L, totalBytes);
  return 2;
}
开发者ID:ASAPPinc,项目名称:cutorch,代码行数:18,代码来源:init.c

示例8: oskar_device_mem_info

void oskar_device_mem_info(size_t* mem_free, size_t* mem_total)
{
    if (!mem_free || !mem_total) return;
#ifdef OSKAR_HAVE_CUDA
    cudaMemGetInfo(mem_free, mem_total);
#else
    (void) mem_free;
    (void) mem_total;
#endif
}
开发者ID:shaoguangleo,项目名称:OSKAR,代码行数:10,代码来源:oskar_device_utils.c

示例9: print_memory_stats

static KMCUDAResult print_memory_stats() {
  size_t free_bytes, total_bytes;
  if (cudaMemGetInfo(&free_bytes, &total_bytes) != cudaSuccess) {
    return kmcudaRuntimeError;
  }
  printf("GPU memory: used %zu bytes (%.1f%%), free %zu bytes, total %zu bytes\n",
         total_bytes - free_bytes, (total_bytes - free_bytes) * 100.0 / total_bytes,
         free_bytes, total_bytes);
  return kmcudaSuccess;
}
开发者ID:Zhiyu-Chen,项目名称:kmcuda,代码行数:10,代码来源:kmcuda.cpp

示例10: check_device_memory

inline void check_device_memory( const char* filename, const int line_number)
{
#ifdef CUDA_DEBUG
   size_t avail;
   size_t total;
   cudaMemGetInfo( &avail, &total);
   size_t used = total - avail;
   printf( "CUDA device memory usage at %s:%i: Used: %f Mb, Free %f Mb\n", filename, line_number, float(used)/(1024*1024), float(avail)/(1024*1024));
#endif
}
开发者ID:mattoaellis,项目名称:vampire,代码行数:10,代码来源:cuda_utils.hpp

示例11: alloc

AllocPtr CudaDevice::CreateDefaultAlloc() {
	// Create the allocator. Use a bucket allocator with a capacity limit at
	// 80% of free mem.
	intrusive_ptr<CudaAllocBuckets> alloc(new CudaAllocBuckets(this));
	size_t freeMem, totalMem;

	cudaMemGetInfo(&freeMem, &totalMem);
	alloc->SetCapacity((size_t)(.80 * freeMem));
	
	return AllocPtr(alloc.get());
}
开发者ID:BillOmg,项目名称:moderngpu,代码行数:11,代码来源:mgpucontext.cpp

示例12: cuda_safe_call

		std::ostream& operator<< (std::ostream& out, const cuda_running_configuration& running_configuration)
		{
			out << "--- CUDA versions ---" << std::endl;
			out << "Driver version = " << running_configuration.driver_version / 1000 << "." << (running_configuration.driver_version % 100) / 10 << std::endl;
			out << "Runtime version = " << running_configuration.runtime_version / 1000 << "." << (running_configuration.runtime_version % 100) / 10 << std::endl;

			out << "--- Device ---" << std::endl;

			out << "Device Id = " << running_configuration.device_id << std::endl;
			out << "Device name = " << running_configuration.device_name << std::endl;
			out << "Compute capability = " << running_configuration.compute_capability_major << "." << running_configuration.compute_capability_minor << std::endl;
			out << "Clock rate = " << (running_configuration.clock_rate / 1000) << " MHz" << std::endl;
			out << "Memory clock rate = " << (running_configuration.memory_clock_rate / 1000) << " MHz" << std::endl;
			out << "Memory bus width = " << running_configuration.memory_bus_width << " bits" << std::endl;
			out << "Global memory size = " << running_configuration.global_memory_size / (1024 * 1024) << " MB" << std::endl;
			out << "ECC support = " << (running_configuration.ecc_enabled ? "Enabled" : "Disabled") << std::endl;
			out << "L2 cache size = " << running_configuration.l2_cache_size << " bytes" << std::endl;
			out << "Multiprocessor count = " << running_configuration.multiprocessor_count << std::endl;
			out << "Shared memory per block size = " << running_configuration.smem_per_block << " bytes" << std::endl;
			out << "Maximum number of threads per multiprocessor = " << running_configuration.max_threads_per_multiprocessor << std::endl;
			out << "Maximum number of threads per block = " << running_configuration.max_threads_per_block << std::endl;
			out << "Maximum sizes of each dimension of a block = "
				<< running_configuration.max_threads_dim[0] << " x "
				<< running_configuration.max_threads_dim[1] << " x "
				<< running_configuration.max_threads_dim[2] << std::endl;
			out << "Maximum sizes of each dimension of a grid = "
				<< running_configuration.max_grid_size[0] << " x "
				<< running_configuration.max_grid_size[1] << " x "
				<< running_configuration.max_grid_size[2] << std::endl;
			out << "Maximum size of 1D texture bound to linear memory = " << running_configuration.max_texture_1d_linear << std::endl;
			out << "Texture alignment = " << running_configuration.texture_alignment << " bytes" << std::endl;
			out << "PCI Bus ID = " << running_configuration.pci_bus_id << std::endl;
			out << "PCI Location ID = " << running_configuration.pci_device_id << std::endl;
			#ifdef WIN32
				out << "Driver mode = " << (running_configuration.tcc_mode ? "TCC" : "WDDM") << std::endl;
			#endif

			out << "--- Settings ---" << std::endl;

			out << "Max global memory usage ratio = " << running_configuration.max_global_memory_usage_ratio << std::endl;

			out << "--- Status ---" << std::endl;

			size_t free_memory;
			size_t total_memory;
			cuda_safe_call(cudaMemGetInfo(&free_memory, &total_memory));

			out << "Free memory = " << free_memory / (1024 * 1024) << " MB" << std::endl;
			out << "Total memory = " << total_memory / (1024 * 1024) << " MB" << std::endl;

			return out;
		}
开发者ID:yzxyzh,项目名称:nnForge,代码行数:52,代码来源:cuda_running_configuration.cpp

示例13: cudaMemoryInfoText

inline std::string
cudaMemoryInfoText()
{
    size_t free;
    size_t total;
    CUGIP_CHECK_RESULT(cudaMemGetInfo( &free, &total));

    return boost::str( boost::format("Free GPU memory: %1% MB; Total GPU memory %2% MB; Occupied %3%%%")
                       % (float(free) / (1024*1024))
                       % (float(total) / (1024*1024))
                       % (100.0f * float(total - free)/total)
                     );
}
开发者ID:JanKolomaznik,项目名称:cugip,代码行数:13,代码来源:utils.hpp

示例14: getMemUsage

int getMemUsage(const int &myRank)
{
    size_t free_byte,
           total_byte;

    CHECK(cudaMemGetInfo(&free_byte, &total_byte));

    std::cout << "myRank: " << myRank << " "
              << free_byte / 1024.0 / 1024.0 
              << " / " << total_byte / 1024.0 / 1024.0 << std::endl;

    return 0;
}
开发者ID:piyueh,项目名称:PoissonTest,代码行数:13,代码来源:GPUFuncs.cpp

示例15: memoryInfo

void memoryInfo(void)
{
	size_t free;
	size_t total;
	
	cudaCheck(cudaMemGetInfo (&free,&total),"MemInfo11");
	
	printf("\n");
	printf("\nRANK=%d\n",RANK);
	printf("\nGPU total memory = % .2f MB\n",(float)total/1e6);
	printf("\nGPU free  memory = % .2f MB\n",(float)free/1e6);

}
开发者ID:albertovelam,项目名称:HIT_MPI,代码行数:13,代码来源:memory.c


注:本文中的cudaMemGetInfo函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。