本文整理汇总了C++中boost::compute::command_queue::get_context方法的典型用法代码示例。如果您正苦于以下问题:C++ command_queue::get_context方法的具体用法?C++ command_queue::get_context怎么用?C++ command_queue::get_context使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类boost::compute::command_queue
的用法示例。
在下文中一共展示了command_queue::get_context方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
decltype(auto) matrix_transpose(
InputRange const& input, OutputRange& output,
int row_size, int col_size,
boost::compute::command_queue& queue) {
NEU_ASSERT(row_size*col_size == range::distance(input));
static auto transpose_kernel =
neu::make_kernel(neu::layer::impl::matrix_transpose_kernel_source,
"matrix_transpose", queue.get_context());
transpose_kernel.set_args(
range::get_buffer(input),
static_cast<cl_int>(range::get_begin_index(input)),
range::get_buffer(output),
static_cast<cl_int>(range::get_begin_index(output)),
static_cast<cl_int>(row_size),
static_cast<cl_int>(col_size));
std::size_t global[2] = {
static_cast<std::size_t>(((col_size-1)/32+1)*32),
static_cast<std::size_t>(((row_size-1)/32+1)*32)
};
std::size_t local[2] = {
static_cast<std::size_t>(32),
static_cast<std::size_t>(32)
};
queue.enqueue_nd_range_kernel(transpose_kernel, 2, nullptr, global, local);
}
示例2: decltype
static decltype(auto) call(
std::vector<neu::layer::any_layer>& layers,
int batch_size,
InputRange const& initial_input, OutputRange& result_output,
boost::compute::command_queue& queue) {
gpu_vector input(initial_input.begin(), initial_input.end(), queue);
gpu_vector output(queue.get_context());
int i = 0;
for(auto& l : layers) {
output.resize(::neu::layer::output_dim(l)*batch_size, queue);
/*
std::cout << "whole" << ::neu::layer::whole_output_size(l) << std::endl;
std::cout << "i" << i << std::endl;
std::cout << "aa" << output.size() << std::endl;
*/
auto output_range = range::to_range(output);
#ifdef NEU_BENCHMARK_ENABLE
boost::timer t;
#endif //NEU_BENCHMARK_ENABLE
l.test_forward(batch_size,
range::to_range(input), output_range, queue);
#ifdef NEU_BENCHMARK_ENABLE
queue.finish();
std::cout << "layer" << i << "\ttest_forward\t" << t.elapsed() << " secs" << std::endl;
#endif //NEU_BENCHMARK_ENABLE
input.swap(output);
++i;
}
range::copy(input, result_output, queue);
}
示例3: test_fill
inline void test_fill(T v1, T v2, T v3, bc::command_queue queue) {
if(boost::is_same<typename bc::scalar_type<T>::type, bc::double_>::value &&
!queue.get_device().supports_extension("cl_khr_fp64")) {
std::cerr << "Skipping test_fill<" << bc::type_name<T>() << ">() "
"on device which doesn't support cl_khr_fp64" << std::endl;
return;
}
bc::vector<T> vector(4, queue.get_context());
bc::fill(vector.begin(), vector.end(), v1, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v1, v1, v1, v1));
vector.resize(1000, queue);
bc::fill(vector.begin(), vector.end(), v2, queue);
queue.finish();
BOOST_CHECK_EQUAL(vector.front(), v2);
BOOST_CHECK_EQUAL(vector.back(), v2);
bc::fill(vector.begin() + 500, vector.end(), v3, queue);
queue.finish();
BOOST_CHECK_EQUAL(vector.front(), v2);
BOOST_CHECK_EQUAL(vector[499], v2);
BOOST_CHECK_EQUAL(vector[500], v3);
BOOST_CHECK_EQUAL(vector.back(), v3);
}
示例4: test_fill_n
inline void test_fill_n(T v1, T v2, T v3, bc::command_queue queue) {
bc::vector<T> vector(4, queue.get_context());
bc::fill_n(vector.begin(), 4, v1, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v1, v1, v1, v1));
bc::fill_n(vector.begin(), 3, v2, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v2, v2, v2, v1));
bc::fill_n(vector.begin() + 1, 2, v3, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v2, v3, v3, v1));
bc::fill_n(vector.begin(), 4, v2, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v2, v2, v2, v2));
// fill last element
bc::fill_n(vector.end() - 1, 1, v3, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v2, v2, v2, v3));
// fill first element
bc::fill_n(vector.begin(), 1, v1, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v1, v2, v2, v3));
}
示例5: test_fill_n
inline void test_fill_n(T v1, T v2, T v3, bc::command_queue queue) {
if(boost::is_same<typename bc::scalar_type<T>::type, bc::double_>::value &&
!queue.get_device().supports_extension("cl_khr_fp64")) {
std::cerr << "Skipping test_fill_n<" << bc::type_name<T>() << ">() "
"on device which doesn't support cl_khr_fp64" << std::endl;
return;
}
bc::vector<T> vector(4, queue.get_context());
bc::fill_n(vector.begin(), 4, v1, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v1, v1, v1, v1));
bc::fill_n(vector.begin(), 3, v2, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v2, v2, v2, v1));
bc::fill_n(vector.begin() + 1, 2, v3, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v2, v3, v3, v1));
bc::fill_n(vector.begin(), 4, v2, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v2, v2, v2, v2));
// fill last element
bc::fill_n(vector.end() - 1, 1, v3, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v2, v2, v2, v3));
// fill first element
bc::fill_n(vector.begin(), 1, v1, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v1, v2, v2, v3));
}
示例6: perf_random_number_engine
void perf_random_number_engine(const size_t size,
const size_t trials,
compute::command_queue& queue)
{
typedef typename Engine::result_type T;
// create random number engine
Engine engine(queue);
// create vector on the device
std::cout << "size = " << size << std::endl;
compute::vector<T> vector(size, queue.get_context());
// generate random numbers
perf_timer t;
for(size_t i = 0; i < trials; i++){
t.start();
engine.generate(vector.begin(), vector.end(), queue);
queue.finish();
t.stop();
}
// print result
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
std::cout << "rate: " << perf_rate<T>(size, t.min_time()) << " MB/s" << std::endl;
}
示例7: sizeof
device_vector(const boost::compute::command_queue &q, size_t n,
const T *host = 0, mem_flags flags = MEM_READ_WRITE)
{
if (host && !(flags & CL_MEM_USE_HOST_PTR))
flags |= CL_MEM_COPY_HOST_PTR;
if (n)
buffer = boost::compute::buffer(q.get_context(), n * sizeof(T),
flags, static_cast<void*>(const_cast<T*>(host)));
}
示例8: generate
void generate(OutputIterator first, OutputIterator last, Function op, boost::compute::command_queue &queue)
{
boost::compute::vector<T> tmp(std::distance(first, last), queue.get_context());
BOOST_COMPUTE_FUNCTION(T, max_random, (const T x),
{
if(get_global_id(0) < 1)
return (ValueType) MAX_RANDOM;
else
return (ValueType) 0;
});
示例9: test_copy_if_odd
void test_copy_if_odd(compute::command_queue &queue)
{
// create input and output vectors on the device
const compute::context &context = queue.get_context();
compute::vector<int> input(PERF_N, context);
compute::vector<int> output(PERF_N, context);
// generate random numbers between 1 and 10
compute::default_random_engine rng(queue);
compute::uniform_int_distribution<int> d(1, 10);
d.generate(input.begin(), input.end(), rng, queue);
BOOST_COMPUTE_FUNCTION(bool, is_odd, (int x),
{
return x & 1;
});
示例10: tesselate_sphere
// tesselates a sphere with radius, phi_slices, and theta_slices. returns
// a shared opencl/opengl buffer containing the vertex data.
compute::opengl_buffer tesselate_sphere(float radius,
size_t phi_slices,
size_t theta_slices,
compute::command_queue &queue)
{
using compute::dim;
const compute::context &context = queue.get_context();
const size_t vertex_count = phi_slices * theta_slices;
// create opengl buffer
GLuint vbo;
vtkgl::GenBuffersARB(1, &vbo);
vtkgl::BindBufferARB(vtkgl::ARRAY_BUFFER, vbo);
vtkgl::BufferDataARB(vtkgl::ARRAY_BUFFER,
sizeof(float) * 4 * vertex_count,
NULL,
vtkgl::STREAM_DRAW);
vtkgl::BindBufferARB(vtkgl::ARRAY_BUFFER, 0);
// create shared opengl/opencl buffer
compute::opengl_buffer vertex_buffer(context, vbo);
// tesselate_sphere kernel source
const char source[] = BOOST_COMPUTE_STRINGIZE_SOURCE(
__kernel void tesselate_sphere(float radius,
uint phi_slices,
uint theta_slices,
__global float4 *vertex_buffer)
{
const uint phi_i = get_global_id(0);
const uint theta_i = get_global_id(1);
const float phi = phi_i * 2.f * M_PI_F / phi_slices;
const float theta = theta_i * 2.f * M_PI_F / theta_slices;
float4 v;
v.x = radius * cos(theta) * cos(phi);
v.y = radius * cos(theta) * sin(phi);
v.z = radius * sin(theta);
v.w = 1.f;
vertex_buffer[phi_i*phi_slices+theta_i] = v;
}
);
示例11:
/**
* If VEXCL_CACHE_KERNELS macro is defined, then program binaries are cached
* in filesystem and reused in the following runs.
*/
inline boost::compute::program build_sources(
const boost::compute::command_queue &queue,
const std::string &source,
const std::string &options = ""
)
{
#ifdef VEXCL_SHOW_KERNELS
std::cout << source << std::endl;
#else
if (getenv("VEXCL_SHOW_KERNELS"))
std::cout << source << std::endl;
#endif
return boost::compute::program::build_with_source(
source, queue.get_context(),
options + " " + get_compile_options(queue)
);
}
示例12: test_fill
inline void test_fill(T v1, T v2, T v3, bc::command_queue queue) {
bc::vector<T> vector(4, queue.get_context());
bc::fill(vector.begin(), vector.end(), v1, queue);
queue.finish();
CHECK_RANGE_EQUAL(T, 4, vector, (v1, v1, v1, v1));
vector.resize(1000, queue);
bc::fill(vector.begin(), vector.end(), v2, queue);
queue.finish();
BOOST_CHECK_EQUAL(vector.front(), v2);
BOOST_CHECK_EQUAL(vector.back(), v2);
bc::fill(vector.begin() + 500, vector.end(), v3, queue);
queue.finish();
BOOST_CHECK_EQUAL(vector.front(), v2);
BOOST_CHECK_EQUAL(vector[499], v2);
BOOST_CHECK_EQUAL(vector[500], v3);
BOOST_CHECK_EQUAL(vector.back(), v3);
}
示例13: box_filter_image
inline void box_filter_image(const compute::image2d &input,
compute::image2d &output,
compute::uint_ box_height,
compute::uint_ box_width,
compute::command_queue &queue)
{
using compute::dim;
const compute::context &context = queue.get_context();
// simple box filter kernel source
const char source[] = BOOST_COMPUTE_STRINGIZE_SOURCE(
__kernel void box_filter(__read_only image2d_t input,
__write_only image2d_t output,
uint box_height,
uint box_width)
{
int x = get_global_id(0);
int y = get_global_id(1);
int h = get_image_height(input);
int w = get_image_width(input);
int k = box_width;
int l = box_height;
if(x < k/2 || y < l/2 || x >= w-(k/2) || y >= h-(l/2)){
write_imagef(output, (int2)(x, y), (float4)(0, 0, 0, 1));
}
else {
const sampler_t sampler = CLK_ADDRESS_NONE | CLK_FILTER_NEAREST;
float4 sum = { 0, 0, 0, 0 };
for(int i = 0; i < k; i++){
for(int j = 0; j < l; j++){
sum += read_imagef(input, sampler, (int2)(x+i-k, y+j-l));
}
}
sum /= (float) k * l;
float4 value = (float4)( sum.x, sum.y, sum.z, 1.f );
write_imagef(output, (int2)(x, y), value);
}
}
示例14:
vector(vector_expression<E, gpu_tag> const& e, boost::compute::command_queue& queue)
: m_storage(e().size(), queue.get_context())
, m_queue(&queue){
assign(*this, e);
}
示例15: vector
/// \brief Constructor of a vector with a predefined size
/// By default, its elements are uninitialized.
/// \param size initial size of the vector
/// \param queue the opencl queue to use by this vector
explicit vector(size_type size, boost::compute::command_queue& queue = boost::compute::system::default_queue())
: m_storage(size,queue.get_context()), m_queue(&queue){}