本文整理汇总了C++中DeviceContext::id方法的典型用法代码示例。如果您正苦于以下问题:C++ DeviceContext::id方法的具体用法?C++ DeviceContext::id怎么用?C++ DeviceContext::id使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DeviceContext
的用法示例。
在下文中一共展示了DeviceContext::id方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: A
TYPED_TEST(GemmTest, TestGemmCPUGPU) {
DeviceContext *dc = Caffe::GetDefaultDeviceContext();
Blob<TypeParam> A(1, 1, 2, 3, Caffe::GetDefaultDeviceContext());
Blob<TypeParam> B(1, 1, 3, 4, Caffe::GetDefaultDeviceContext());
Blob<TypeParam> C(1, 1, 2, 4, Caffe::GetDefaultDeviceContext());
TypeParam data[12] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
TypeParam A_reshape_data[6] = {1, 4, 2, 5, 3, 6};
TypeParam B_reshape_data[12] = {1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12};
TypeParam result[8] = {38, 44, 50, 56, 83, 98, 113, 128};
caffe_cpu_copy(6, data, A.mutable_cpu_data());
caffe_cpu_copy(12, data, B.mutable_cpu_data());
// [1, 2, 3; 4 5 6] * [1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12];
caffe_cpu_gemm<TypeParam>(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1.,
A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data());
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(C.cpu_data()[i], result[i]);
}
if (dc->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_gemm<TypeParam>(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1.,
A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data());
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
greentea_gpu_gemm<TypeParam>(dc->id(), CblasNoTrans, CblasNoTrans,
2, 4, 3, 1.,
(cl_mem)(A.gpu_data()), 0,
(cl_mem)(B.gpu_data()), 0, 0.,
(cl_mem)(C.mutable_gpu_data()), 0);
#endif // USE_GREENTEA
}
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(C.cpu_data()[i], result[i]);
}
// Test when we have a transposed A
A.Reshape(1, 1, 3, 2);
caffe_cpu_copy(6, A_reshape_data, A.mutable_cpu_data());
caffe_cpu_gemm<TypeParam>(CblasTrans, CblasNoTrans, 2, 4, 3, 1.,
A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data());
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(C.cpu_data()[i], result[i]);
}
if (dc->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_gemm<TypeParam>(CblasTrans, CblasNoTrans, 2, 4, 3, 1.,
A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data());
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
greentea_gpu_gemm<TypeParam>(dc->id(), CblasTrans, CblasNoTrans,
2, 4, 3, 1.,
(cl_mem)(A.gpu_data()), 0,
(cl_mem)(B.gpu_data()), 0,
0., (cl_mem)(C.mutable_gpu_data()), 0);
#endif // USE_GREENTEA
}
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(C.cpu_data()[i], result[i]);
}
// Test when we have a transposed A and a transposed B too
B.Reshape(1, 1, 4, 3);
caffe_cpu_copy(12, B_reshape_data, B.mutable_cpu_data());
caffe_cpu_gemm<TypeParam>(CblasTrans, CblasTrans, 2, 4, 3, 1.,
A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data());
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(C.cpu_data()[i], result[i]);
}
if (dc->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_gemm<TypeParam>(CblasTrans, CblasTrans, 2, 4, 3, 1.,
A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data());
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
greentea_gpu_gemm<TypeParam>(dc->id(), CblasTrans, CblasTrans,
2, 4, 3, 1.,
(cl_mem)(A.gpu_data()), 0,
(cl_mem)(B.gpu_data()), 0, 0.,
(cl_mem)(C.mutable_gpu_data()), 0);
#endif // USE_GREENTEA
}
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(C.cpu_data()[i], result[i]);
}
// Test when we have a transposed B
A.Reshape(1, 1, 2, 3);
caffe_cpu_copy(6, data, A.mutable_cpu_data());
//.........这里部分代码省略.........