本文整理汇总了C++中cusparseCreateMatDescr函数的典型用法代码示例。如果您正苦于以下问题:C++ cusparseCreateMatDescr函数的具体用法?C++ cusparseCreateMatDescr怎么用?C++ cusparseCreateMatDescr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cusparseCreateMatDescr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cublas_handle_
Caffe::Caffe()
: cublas_handle_(NULL),cusparse_handle_(NULL),cusparse_descr_(NULL),curand_generator_(NULL),random_generator_(),mode_(Caffe::CPU), solver_count_(1), root_solver_(true){
// Try to create a cublas handler, and report an error if failed (but we will
// keep the program running as one might just want to run CPU code).
LOG(INFO)<<"caffe init.";
if (cublasCreate(&cublas_handle_) != CUBLAS_STATUS_SUCCESS) {
LOG(ERROR) << "Cannot create Cublas handle. Cublas won't be available.";
}
//add cusparse handler
if (cusparseCreate(&cusparse_handle_)!=CUSPARSE_STATUS_SUCCESS){
LOG(ERROR) << "cannot create Cusparse handle,Cusparse won't be available.";
}
if(cusparseCreateMatDescr(&cusparse_descr_)!=CUSPARSE_STATUS_SUCCESS){
LOG(ERROR) << "cannot create Cusparse descr,descr won't be available.";
}else{
cusparseSetMatType(cusparse_descr_,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(cusparse_descr_,CUSPARSE_INDEX_BASE_ZERO);
LOG(INFO)<<"init descr";
}
// Try to create a curand handler.
if (curandCreateGenerator(&curand_generator_, CURAND_RNG_PSEUDO_DEFAULT)
!= CURAND_STATUS_SUCCESS ||
curandSetPseudoRandomGeneratorSeed(curand_generator_, cluster_seedgen())
!= CURAND_STATUS_SUCCESS) {
LOG(ERROR) << "Cannot create Curand generator. Curand won't be available.";
}
LOG(INFO)<<"caffe finish";
}
示例2: CudaSparseSingleton
CudaSparseSingleton()
{
cusparseCreate( & handle );
cusparseCreateMatDescr( & descra );
cusparseSetMatType( descra , CUSPARSE_MATRIX_TYPE_GENERAL );
cusparseSetMatIndexBase( descra , CUSPARSE_INDEX_BASE_ZERO );
}
示例3: fprintf
// initialize CUDA
ssp_cuda *ssp_init_cuda() {
ssp_cuda *cudaHandle = (ssp_cuda*)malloc(sizeof(ssp_cuda));
if (!cudaHandle) {
fprintf(stderr,"ssp_init_cuda: cudaHandle memory allocation failed.\n");
return NULL;
}
cudaHandle->cusparse_handle = 0;
cudaHandle->cusparse_matDescr = 0;
cusparseStatus_t status = cusparseCreate(&cudaHandle->cusparse_handle);
if (status != CUSPARSE_STATUS_SUCCESS) {
ssp_finalize_cuda(cudaHandle);
fprintf(stderr,"ssp_init_cuda: cusparse initialization failed.\n");
return NULL;
}
status = cusparseCreateMatDescr(&cudaHandle->cusparse_matDescr);
if (status != CUSPARSE_STATUS_SUCCESS) {
ssp_finalize_cuda(cudaHandle);
fprintf(stderr,"ssp_init_cuda: cusparse matrix setup failed.\n");
return NULL;
}
cusparseSetMatType(cudaHandle->cusparse_matDescr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(cudaHandle->cusparse_matDescr,CUSPARSE_INDEX_BASE_ZERO);
return cudaHandle;
}
示例4: xDense2Csr
xDense2Csr(StatisticalTimer& timer) : cusparseFunc(timer)
{
cusparseStatus_t err = cusparseCreateMatDescr(&descrA);
CUDA_V_THROW(err, "cusparseCreateMatDescr failed");
err = cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
CUDA_V_THROW(err, "cusparseSetMatType failed");
err = cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO);
CUDA_V_THROW(err, "cusparseSetMatIndexBase failed");
n_rows = 0;
n_cols = 0;
n_vals = 0;
device_col_indices = nullptr;
device_row_offsets = nullptr;
device_values = nullptr;
device_A = nullptr;
nnzPerRow = nullptr;
devRowOffsets = nullptr;
devColIndices = nullptr;
devValues = nullptr;
}// end
示例5: CUDA_CHECK
void Caffe::SetDevice(const int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
if (Get().cublas_handle_) CUBLAS_CHECK(cublasDestroy(Get().cublas_handle_));
if (Get().cusparse_descr_)CUSPARSE_CHECK(cusparseDestroyMatDescr(Get().cusparse_descr_));
if (Get().cusparse_handle_)CUSPARSE_CHECK(cusparseDestroy(Get().cusparse_handle_));
if (Get().curand_generator_) {
CURAND_CHECK(curandDestroyGenerator(Get().curand_generator_));
}
CUSPARSE_CHECK(cusparseCreate(&Get().cusparse_handle_));
CUSPARSE_CHECK(cusparseCreateMatDescr(&Get().cusparse_descr_));
// cusparseSetMatType(cusparse_descr_,CUSPARSE_MATRIX_TYPE_GENERAL);
// cusparseSetMatIndexBase(cusparse_descr_,CUSPARSE_INDEX_BASE_ZERO);
LOG(INFO)<<"set descr";
CUBLAS_CHECK(cublasCreate(&Get().cublas_handle_));
CURAND_CHECK(curandCreateGenerator(&Get().curand_generator_,
CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(Get().curand_generator_,
cluster_seedgen()));
}
示例6: cuSparseHandleType
cuSparseHandleType(bool transposeA, bool transposeB){
cusparseStatus_t status;
status= cusparseCreate(&handle);
if (status != CUSPARSE_STATUS_SUCCESS) {
std::cerr << ("cusparseCreate ERROR") << std::endl;
return;
}
cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST);
if (transposeA){
transA = CUSPARSE_OPERATION_TRANSPOSE;
}
else {
transA = CUSPARSE_OPERATION_NON_TRANSPOSE;
}
if (transposeB){
transB = CUSPARSE_OPERATION_TRANSPOSE;
}
else {
transB = CUSPARSE_OPERATION_NON_TRANSPOSE;
}
status = cusparseCreateMatDescr(&a_descr);
if (status != CUSPARSE_STATUS_SUCCESS) {
std::cerr << "cusparseCreateMatDescr a_descr ERROR" << std::endl;
return;
}
cusparseSetMatType(a_descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(a_descr,CUSPARSE_INDEX_BASE_ZERO);
status = cusparseCreateMatDescr(&b_descr);
if (status != CUSPARSE_STATUS_SUCCESS) {
std::cerr << ("cusparseCreateMatDescr b_descr ERROR") << std::endl;
return;
}
cusparseSetMatType(b_descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(b_descr,CUSPARSE_INDEX_BASE_ZERO);
status = cusparseCreateMatDescr(&c_descr);
if (status != CUSPARSE_STATUS_SUCCESS) {
std::cerr << ("cusparseCreateMatDescr c_descr ERROR") << std::endl;
return;
}
cusparseSetMatType(c_descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(c_descr,CUSPARSE_INDEX_BASE_ZERO);
}
示例7: magma_dapplycuicc_l
magma_int_t
magma_dapplycuicc_l( magma_d_vector b, magma_d_vector *x,
magma_d_preconditioner *precond ){
double one = MAGMA_D_MAKE( 1.0, 0.0);
// CUSPARSE context //
cusparseHandle_t cusparseHandle;
cusparseStatus_t cusparseStatus;
cusparseStatus = cusparseCreate(&cusparseHandle);
if(cusparseStatus != 0) printf("error in Handle.\n");
cusparseMatDescr_t descrL;
cusparseStatus = cusparseCreateMatDescr(&descrL);
if(cusparseStatus != 0) printf("error in MatrDescr.\n");
cusparseStatus =
cusparseSetMatType(descrL,CUSPARSE_MATRIX_TYPE_TRIANGULAR);
if(cusparseStatus != 0) printf("error in MatrType.\n");
cusparseStatus =
cusparseSetMatDiagType (descrL, CUSPARSE_DIAG_TYPE_NON_UNIT);
if(cusparseStatus != 0) printf("error in DiagType.\n");
cusparseStatus =
cusparseSetMatFillMode(descrL,CUSPARSE_FILL_MODE_LOWER);
if(cusparseStatus != 0) printf("error in fillmode.\n");
cusparseStatus =
cusparseSetMatIndexBase(descrL,CUSPARSE_INDEX_BASE_ZERO);
if(cusparseStatus != 0) printf("error in IndexBase.\n");
// end CUSPARSE context //
cusparseStatus =
cusparseDcsrsv_solve( cusparseHandle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
precond->M.num_rows, &one,
descrL,
precond->M.val,
precond->M.row,
precond->M.col,
precond->cuinfoL,
b.val,
x->val );
if(cusparseStatus != 0) printf("error in L triangular solve:%p.\n", precond->cuinfoL );
cusparseDestroyMatDescr( descrL );
cusparseDestroy( cusparseHandle );
magma_device_sync();
return MAGMA_SUCCESS;
}
示例8: cusparse_safe_call
void sparse_fully_connected_1x1_layer_tester_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
{
cusparse_safe_call(cusparseSetStream(cuda_config->get_cusparse_handle(), stream_id));
float alpha = 1.0F;
float beta = 0.0F;
cusparseMatDescr_t mat_descr;
cusparse_safe_call(cusparseCreateMatDescr(&mat_descr));
cusparse_safe_call(cusparseScsrmm(
cuda_config->get_cusparse_handle(),
CUSPARSE_OPERATION_NON_TRANSPOSE,
output_elem_count_per_entry,
entry_count,
input_elem_count_per_entry_list[0],
feature_map_connection_count,
&alpha,
mat_descr,
*data[0],
*data_custom[1],
*data_custom[0],
*input_buffers[0],
input_elem_count_per_entry_list[0],
&beta,
*output_buffer,
output_elem_count_per_entry));
}
// Add bias
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_util::set_tensor_descriptor(
output_data_desc,
output_configuration_specific,
entry_count);
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*output_buffer));
}
}
示例9: xCsr2Dense
xCsr2Dense( StatisticalTimer& timer, bool read_explicit_zeroes = true ): cusparseFunc( timer )
{
cusparseStatus_t err = cusparseCreateMatDescr( &descrA );
CUDA_V_THROW( err, "cusparseCreateMatDescr failed" );
err = cusparseSetMatType( descrA, CUSPARSE_MATRIX_TYPE_GENERAL );
CUDA_V_THROW( err, "cusparseSetMatType failed" );
err = cusparseSetMatIndexBase( descrA, CUSPARSE_INDEX_BASE_ZERO );
CUDA_V_THROW( err, "cusparseSetMatIndexBase failed" );
n_rows = 0;
n_cols = 0;
n_vals = 0;
explicit_zeroes = read_explicit_zeroes;
}
示例10: CudaSparseSingleton
CudaSparseSingleton()
{
status = cusparseCreate(&handle);
if(status != CUSPARSE_STATUS_SUCCESS)
{
throw std::runtime_error( std::string("ERROR - CUSPARSE Library Initialization failed" ) );
}
status = cusparseCreateMatDescr(&descra);
if(status != CUSPARSE_STATUS_SUCCESS)
{
throw std::runtime_error( std::string("ERROR - CUSPARSE Library Matrix descriptor failed" ) );
}
cusparseSetMatType(descra , CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descra , CUSPARSE_INDEX_BASE_ZERO);
}
示例11: magma_capplycumicc_l
extern "C" magma_int_t
magma_capplycumicc_l(
magma_c_matrix b,
magma_c_matrix *x,
magma_c_preconditioner *precond,
magma_queue_t queue )
{
magma_int_t info = 0;
cusparseHandle_t cusparseHandle=NULL;
cusparseMatDescr_t descrL=NULL;
magmaFloatComplex one = MAGMA_C_MAKE( 1.0, 0.0);
// CUSPARSE context //
CHECK_CUSPARSE( cusparseCreate( &cusparseHandle ));
CHECK_CUSPARSE( cusparseSetStream( cusparseHandle, queue ));
CHECK_CUSPARSE( cusparseCreateMatDescr( &descrL ));
CHECK_CUSPARSE( cusparseSetMatType( descrL, CUSPARSE_MATRIX_TYPE_TRIANGULAR ));
CHECK_CUSPARSE( cusparseSetMatDiagType( descrL, CUSPARSE_DIAG_TYPE_NON_UNIT ));
CHECK_CUSPARSE( cusparseSetMatFillMode( descrL, CUSPARSE_FILL_MODE_LOWER ));
CHECK_CUSPARSE( cusparseSetMatIndexBase( descrL, CUSPARSE_INDEX_BASE_ZERO ));
CHECK_CUSPARSE( cusparseCcsrsm_solve( cusparseHandle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
precond->M.num_rows,
b.num_rows*b.num_cols/precond->M.num_rows,
&one,
descrL,
precond->M.dval,
precond->M.drow,
precond->M.dcol,
precond->cuinfoL,
b.dval,
precond->M.num_rows,
x->dval,
precond->M.num_rows ));
magma_device_sync();
cleanup:
cusparseDestroyMatDescr( descrL );
cusparseDestroy( cusparseHandle );
return info;
}
示例12: descrA
sparse_matrix::sparse_matrix(sparse_matrix::descriptor_t descriptor,
int rows, int cols, int nonzeros,
const double* values, const int* col_ptr, const int* row_ind)
: descrA(), m(), n(), nnz(), csrValA(), csrRowPtrA(), csrColIndA()
{
// Create descriptor
assert(cusparseCreateMatDescr(&descrA) == cudaSuccess);
assert(cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO) == cudaSuccess);
// Set descriptor fields
switch (descriptor) {
case non_symmetric:
assert(cusparseSetMatDiagType(descrA, CUSPARSE_DIAG_TYPE_NON_UNIT) == cudaSuccess);
assert(cusparseSetMatType (descrA, CUSPARSE_MATRIX_TYPE_GENERAL) == cudaSuccess);
assert(cusparseSetMatFillMode(descrA, CUSPARSE_FILL_MODE_LOWER) == cudaSuccess); // doesn't matter which, presumably
break;
case symmetric_lower:
assert(cusparseSetMatDiagType(descrA, CUSPARSE_DIAG_TYPE_NON_UNIT) == cudaSuccess);
assert(cusparseSetMatType (descrA, CUSPARSE_MATRIX_TYPE_SYMMETRIC) == cudaSuccess);
assert(cusparseSetMatFillMode(descrA, CUSPARSE_FILL_MODE_UPPER) == cudaSuccess); // upper since we're coming with CSC and storing as CSR
break;
case symmetric_upper:
assert(cusparseSetMatDiagType(descrA, CUSPARSE_DIAG_TYPE_NON_UNIT) == cudaSuccess);
assert(cusparseSetMatType (descrA, CUSPARSE_MATRIX_TYPE_SYMMETRIC) == cudaSuccess);
assert(cusparseSetMatFillMode(descrA, CUSPARSE_FILL_MODE_LOWER) == cudaSuccess); // lower since we're coming with CSC and storing as CSR
break;
}
// Switch rows and cols becuase we're coming with CSC and storing as CSR
n = rows;
m = cols;
nnz = nonzeros;
// Allocate memory
assert(cudaMalloc(reinterpret_cast<void**>(&csrValA), nnz * sizeof(double)) == cudaSuccess);
assert(cudaMalloc(reinterpret_cast<void**>(&csrRowPtrA), (m+1) * sizeof(int)) == cudaSuccess);
assert(cudaMalloc(reinterpret_cast<void**>(&csrColIndA), nnz * sizeof(int)) == cudaSuccess);
// Copy values
assert(cudaMemcpy(csrValA, values, nnz * sizeof(double), cudaMemcpyHostToDevice) == cudaSuccess);
assert(cudaMemcpy(csrRowPtrA, col_ptr, (m+1) * sizeof(int), cudaMemcpyHostToDevice) == cudaSuccess);
assert(cudaMemcpy(csrColIndA, row_ind, nnz * sizeof(int), cudaMemcpyHostToDevice) == cudaSuccess);
}
示例13: i
int TxMatrixOptimizationDataCU::ingestLocalMatrix(SparseMatrix& A) {
std::vector<local_int_t> i(A.localNumberOfRows + 1, 0);
// Slight overallocation for these arrays
std::vector<local_int_t> j;
j.reserve(A.localNumberOfNonzeros);
std::vector<double> a;
a.reserve(A.localNumberOfNonzeros);
scatterFromHalo.setNumRows(A.localNumberOfRows);
scatterFromHalo.setNumCols(A.localNumberOfColumns);
scatterFromHalo.clear();
// We're splitting the matrix into diagonal and off-diagonal block to
// enable overlapping of computation and communication.
i[0] = 0;
for (local_int_t m = 0; m < A.localNumberOfRows; ++m) {
local_int_t nonzerosInRow = 0;
for (local_int_t n = 0; n < A.nonzerosInRow[m]; ++n) {
local_int_t col = A.mtxIndL[m][n];
if (col < A.localNumberOfRows) {
j.push_back(col);
a.push_back(A.matrixValues[m][n]);
++nonzerosInRow;
} else {
scatterFromHalo.addEntry(m, col, A.matrixValues[m][n]);
}
}
i[m + 1] = i[m] + nonzerosInRow;
}
// Setup SpMV data on Device
cudaError_t err = cudaSuccess;
int* i_d;
err = cudaMalloc((void**)&i_d, i.size() * sizeof(i[0]));
CHKCUDAERR(err);
err = cudaMemcpy(i_d, &i[0], i.size() * sizeof(i[0]), cudaMemcpyHostToDevice);
CHKCUDAERR(err);
int* j_d;
err = cudaMalloc((void**)&j_d, j.size() * sizeof(j[0]));
CHKCUDAERR(err);
err = cudaMemcpy(j_d, &j[0], j.size() * sizeof(j[0]), cudaMemcpyHostToDevice);
CHKCUDAERR(err);
double* a_d;
err = cudaMalloc((void**)&a_d, a.size() * sizeof(a[0]));
CHKCUDAERR(err);
err = cudaMemcpy(a_d, &a[0], a.size() * sizeof(a[0]), cudaMemcpyHostToDevice);
CHKCUDAERR(err);
cusparseStatus_t cerr = CUSPARSE_STATUS_SUCCESS;
cerr = cusparseCreateMatDescr(&matDescr);
CHKCUSPARSEERR(cerr);
cerr = cusparseSetMatIndexBase(matDescr, CUSPARSE_INDEX_BASE_ZERO);
CHKCUSPARSEERR(cerr);
cerr = cusparseSetMatType(matDescr, CUSPARSE_MATRIX_TYPE_GENERAL);
CHKCUSPARSEERR(cerr);
cerr = cusparseCreateHybMat(&localMatrix);
CHKCUSPARSEERR(cerr);
cerr = cusparseDcsr2hyb(handle, A.localNumberOfRows, A.localNumberOfColumns,
matDescr, a_d, i_d, j_d, localMatrix, 27,
CUSPARSE_HYB_PARTITION_USER);
CHKCUSPARSEERR(cerr);
#ifndef HPCG_NOMPI
err = cudaMalloc((void**)&elementsToSend,
A.totalToBeSent * sizeof(*elementsToSend));
CHKCUDAERR(err);
err = cudaMemcpy(elementsToSend, A.elementsToSend,
A.totalToBeSent * sizeof(*elementsToSend),
cudaMemcpyHostToDevice);
CHKCUDAERR(err);
err = cudaMalloc((void**)&sendBuffer_d, A.totalToBeSent * sizeof(double));
CHKCUDAERR(err);
#endif
// Set up the GS data.
gelusStatus_t gerr = GELUS_STATUS_SUCCESS;
gelusSolveDescription_t solveDescr;
gerr = gelusCreateSolveDescr(&solveDescr);
CHKGELUSERR(gerr);
gerr = gelusSetSolveOperation(solveDescr, GELUS_OPERATION_NON_TRANSPOSE);
CHKGELUSERR(gerr);
gerr = gelusSetSolveFillMode(solveDescr, GELUS_FILL_MODE_FULL);
CHKGELUSERR(gerr);
gerr = gelusSetSolveStorageFormat(solveDescr, GELUS_STORAGE_FORMAT_HYB);
CHKGELUSERR(gerr);
gerr = gelusSetOptimizationLevel(solveDescr, GELUS_OPTIMIZATION_LEVEL_THREE);
CHKGELUSERR(gerr);
gerr = cugelusCreateSorIterationData(&gsContext);
CHKGELUSERR(gerr);
#ifdef HPCG_DEBUG
std::cout << A.localNumberOfRows << std::endl;
std::cout << A.localNumberOfColumns << std::endl;
std::cout << A.localNumberOfNonzeros << std::endl;
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
if (myrank == 0) {
dumpMatrix(std::cout, i, j, a);
}
#endif
gerr = cugelusDcsrsor_iteration_analysis(
//.........这里部分代码省略.........
示例14: main
/* Solve Ax=b using the conjugate gradient method a) without any preconditioning, b) using an Incomplete Cholesky preconditioner and c) using an ILU0 preconditioner. */
int main(int argc, char **argv)
{
const int max_iter = 1000;
int k, M = 0, N = 0, nz = 0, *I = NULL, *J = NULL;
int *d_col, *d_row;
int qatest = 0;
const float tol = 1e-12f;
float *x, *rhs;
float r0, r1, alpha, beta;
float *d_val, *d_x;
float *d_zm1, *d_zm2, *d_rm2;
float *d_r, *d_p, *d_omega, *d_y;
float *val = NULL;
float *d_valsILU0;
float *valsILU0;
float rsum, diff, err = 0.0;
float qaerr1, qaerr2 = 0.0;
float dot, numerator, denominator, nalpha;
const float floatone = 1.0;
const float floatzero = 0.0;
int nErrors = 0;
printf("conjugateGradientPrecond starting...\n");
/* QA testing mode */
if (checkCmdLineFlag(argc, (const char **)argv, "qatest"))
{
qatest = 1;
}
/* This will pick the best possible CUDA capable device */
cudaDeviceProp deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
printf("GPU selected Device ID = %d \n", devID);
if (devID < 0)
{
printf("Invalid GPU device %d selected, exiting...\n", devID);
exit(EXIT_SUCCESS);
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
/* Statistics about the GPU device */
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR (Compressed Sparse Row) format */
M = N = 16384;
nz = 5*N-4*(int)sqrt((double)N);
I = (int *)malloc(sizeof(int)*(N+1)); // csr row pointers for matrix A
J = (int *)malloc(sizeof(int)*nz); // csr column indices for matrix A
val = (float *)malloc(sizeof(float)*nz); // csr values for matrix A
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 0.0; // Initialize RHS
x[i] = 0.0; // Initial approximation of solution
}
genLaplace(I, J, val, M, N, nz, rhs);
/* Create CUBLAS context */
cublasHandle_t cublasHandle = 0;
cublasStatus_t cublasStatus;
cublasStatus = cublasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Create CUSPARSE context */
cusparseHandle_t cusparseHandle = 0;
cusparseStatus_t cusparseStatus;
cusparseStatus = cusparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
/* Description of the A matrix*/
cusparseMatDescr_t descr = 0;
cusparseStatus = cusparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
//.........这里部分代码省略.........
示例15: magma_d_spmv
extern "C" magma_int_t
magma_d_spmv(
double alpha,
magma_d_matrix A,
magma_d_matrix x,
double beta,
magma_d_matrix y,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_d_matrix x2={Magma_CSR};
cusparseHandle_t cusparseHandle = 0;
cusparseMatDescr_t descr = 0;
// make sure RHS is a dense matrix
if ( x.storage_type != Magma_DENSE ) {
printf("error: only dense vectors are supported for SpMV.\n");
info = MAGMA_ERR_NOT_SUPPORTED;
goto cleanup;
}
if ( A.memory_location != x.memory_location ||
x.memory_location != y.memory_location ) {
printf("error: linear algebra objects are not located in same memory!\n");
printf("memory locations are: %d %d %d\n",
A.memory_location, x.memory_location, y.memory_location );
info = MAGMA_ERR_INVALID_PTR;
goto cleanup;
}
// DEV case
if ( A.memory_location == Magma_DEV ) {
if ( A.num_cols == x.num_rows && x.num_cols == 1 ) {
if ( A.storage_type == Magma_CSR || A.storage_type == Magma_CUCSR
|| A.storage_type == Magma_CSRL
|| A.storage_type == Magma_CSRU ) {
CHECK_CUSPARSE( cusparseCreate( &cusparseHandle ));
CHECK_CUSPARSE( cusparseSetStream( cusparseHandle, queue->cuda_stream() ));
CHECK_CUSPARSE( cusparseCreateMatDescr( &descr ));
CHECK_CUSPARSE( cusparseSetMatType( descr, CUSPARSE_MATRIX_TYPE_GENERAL ));
CHECK_CUSPARSE( cusparseSetMatIndexBase( descr, CUSPARSE_INDEX_BASE_ZERO ));
cusparseDcsrmv( cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE,
A.num_rows, A.num_cols, A.nnz, &alpha, descr,
A.dval, A.drow, A.dcol, x.dval, &beta, y.dval );
}
else if ( A.storage_type == Magma_ELL ) {
//printf("using ELLPACKT kernel for SpMV: ");
CHECK( magma_dgeelltmv( MagmaNoTrans, A.num_rows, A.num_cols,
A.max_nnz_row, alpha, A.dval, A.dcol, x.dval, beta,
y.dval, queue ));
//printf("done.\n");
}
else if ( A.storage_type == Magma_ELLPACKT ) {
//printf("using ELL kernel for SpMV: ");
CHECK( magma_dgeellmv( MagmaNoTrans, A.num_rows, A.num_cols,
A.max_nnz_row, alpha, A.dval, A.dcol, x.dval, beta,
y.dval, queue ));
//printf("done.\n");
}
else if ( A.storage_type == Magma_ELLRT ) {
//printf("using ELLRT kernel for SpMV: ");
CHECK( magma_dgeellrtmv( MagmaNoTrans, A.num_rows, A.num_cols,
A.max_nnz_row, alpha, A.dval, A.dcol, A.drow, x.dval,
beta, y.dval, A.alignment, A.blocksize, queue ));
//printf("done.\n");
}
else if ( A.storage_type == Magma_SELLP ) {
//printf("using SELLP kernel for SpMV: ");
CHECK( magma_dgesellpmv( MagmaNoTrans, A.num_rows, A.num_cols,
A.blocksize, A.numblocks, A.alignment,
alpha, A.dval, A.dcol, A.drow, x.dval, beta, y.dval, queue ));
//printf("done.\n");
}
else if ( A.storage_type == Magma_DENSE ) {
//printf("using DENSE kernel for SpMV: ");
magmablas_dgemv( MagmaNoTrans, A.num_rows, A.num_cols, alpha,
A.dval, A.num_rows, x.dval, 1, beta, y.dval,
1, queue );
//printf("done.\n");
}
else if ( A.storage_type == Magma_SPMVFUNCTION ) {
//printf("using DENSE kernel for SpMV: ");
CHECK( magma_dcustomspmv( alpha, x, beta, y, queue ));
//printf("done.\n");
}
else if ( A.storage_type == Magma_BCSR ) {
//printf("using CUSPARSE BCSR kernel for SpMV: ");
// CUSPARSE context //
cusparseDirection_t dirA = CUSPARSE_DIRECTION_ROW;
int mb = magma_ceildiv( A.num_rows, A.blocksize );
int nb = magma_ceildiv( A.num_cols, A.blocksize );
CHECK_CUSPARSE( cusparseCreate( &cusparseHandle ));
CHECK_CUSPARSE( cusparseSetStream( cusparseHandle, queue->cuda_stream() ));
CHECK_CUSPARSE( cusparseCreateMatDescr( &descr ));
cusparseDbsrmv( cusparseHandle, dirA,
CUSPARSE_OPERATION_NON_TRANSPOSE, mb, nb, A.numblocks,
//.........这里部分代码省略.........