本文整理汇总了C++中ops_halo_exchanges函数的典型用法代码示例。如果您正苦于以下问题:C++ ops_halo_exchanges函数的具体用法?C++ ops_halo_exchanges怎么用?C++ ops_halo_exchanges使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ops_halo_exchanges函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ops_par_loop_PdV_kernel_predict
//.........这里部分代码省略.........
ydim3_PdV_kernel_predict = ydim3;
ydim3_PdV_kernel_predict_h = ydim3;
xdim4_PdV_kernel_predict = xdim4;
xdim4_PdV_kernel_predict_h = xdim4;
ydim4_PdV_kernel_predict = ydim4;
ydim4_PdV_kernel_predict_h = ydim4;
xdim5_PdV_kernel_predict = xdim5;
xdim5_PdV_kernel_predict_h = xdim5;
ydim5_PdV_kernel_predict = ydim5;
ydim5_PdV_kernel_predict_h = ydim5;
xdim6_PdV_kernel_predict = xdim6;
xdim6_PdV_kernel_predict_h = xdim6;
ydim6_PdV_kernel_predict = ydim6;
ydim6_PdV_kernel_predict_h = ydim6;
xdim7_PdV_kernel_predict = xdim7;
xdim7_PdV_kernel_predict_h = xdim7;
ydim7_PdV_kernel_predict = ydim7;
ydim7_PdV_kernel_predict_h = ydim7;
xdim8_PdV_kernel_predict = xdim8;
xdim8_PdV_kernel_predict_h = xdim8;
ydim8_PdV_kernel_predict = ydim8;
ydim8_PdV_kernel_predict_h = ydim8;
xdim9_PdV_kernel_predict = xdim9;
xdim9_PdV_kernel_predict_h = xdim9;
ydim9_PdV_kernel_predict = ydim9;
ydim9_PdV_kernel_predict_h = ydim9;
xdim10_PdV_kernel_predict = xdim10;
xdim10_PdV_kernel_predict_h = xdim10;
ydim10_PdV_kernel_predict = ydim10;
ydim10_PdV_kernel_predict_h = ydim10;
xdim11_PdV_kernel_predict = xdim11;
xdim11_PdV_kernel_predict_h = xdim11;
ydim11_PdV_kernel_predict = ydim11;
ydim11_PdV_kernel_predict_h = ydim11;
xdim12_PdV_kernel_predict = xdim12;
xdim12_PdV_kernel_predict_h = xdim12;
ydim12_PdV_kernel_predict = ydim12;
ydim12_PdV_kernel_predict_h = ydim12;
xdim13_PdV_kernel_predict = xdim13;
xdim13_PdV_kernel_predict_h = xdim13;
ydim13_PdV_kernel_predict = ydim13;
ydim13_PdV_kernel_predict_h = ydim13;
}
// Halo Exchanges
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 14);
#else
ops_H_D_exchanges_host(args, 14);
#endif
ops_halo_exchanges(args, 14, range);
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 14);
#else
ops_H_D_exchanges_host(args, 14);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[101].mpi_time += t2 - t1;
}
PdV_kernel_predict_c_wrapper(p_a0, p_a1, p_a2, p_a3, p_a4, p_a5, p_a6, p_a7,
p_a8, p_a9, p_a10, p_a11, p_a12, p_a13, x_size,
y_size, z_size);
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[101].time += t1 - t2;
}
#ifdef OPS_GPU
ops_set_dirtybit_device(args, 14);
#else
ops_set_dirtybit_host(args, 14);
#endif
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[8], range);
ops_set_halo_dirtybit3(&args[11], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[101].mpi_time += t2 - t1;
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg10);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg11);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg12);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg13);
}
}
示例2: ops_par_loop_advec_mom_kernel1_x_nonvector
//.........这里部分代码省略.........
#ifdef OPS_GPU
double *p_a2 = (double *)((char *)args[2].data_d + base2);
#else
double *p_a2 = (double *)((char *)args[2].data + base2);
#endif
int base3 = args[3].dat->base_offset +
(OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size) *
start[0] * args[3].stencil->stride[0];
base3 = base3 +
(OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size) *
args[3].dat->size[0] * start[1] * args[3].stencil->stride[1];
#ifdef OPS_GPU
double *p_a3 = (double *)((char *)args[3].data_d + base3);
#else
double *p_a3 = (double *)((char *)args[3].data + base3);
#endif
int base4 = args[4].dat->base_offset +
(OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size) *
start[0] * args[4].stencil->stride[0];
base4 = base4 +
(OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size) *
args[4].dat->size[0] * start[1] * args[4].stencil->stride[1];
#ifdef OPS_GPU
double *p_a4 = (double *)((char *)args[4].data_d + base4);
#else
double *p_a4 = (double *)((char *)args[4].data + base4);
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
// initialize global variable with the dimension of dats
xdim0 = args[0].dat->size[0];
xdim1 = args[1].dat->size[0];
xdim2 = args[2].dat->size[0];
xdim3 = args[3].dat->size[0];
xdim4 = args[4].dat->size[0];
if (xdim0 != xdim0_advec_mom_kernel1_x_nonvector_h ||
xdim1 != xdim1_advec_mom_kernel1_x_nonvector_h ||
xdim2 != xdim2_advec_mom_kernel1_x_nonvector_h ||
xdim3 != xdim3_advec_mom_kernel1_x_nonvector_h ||
xdim4 != xdim4_advec_mom_kernel1_x_nonvector_h) {
xdim0_advec_mom_kernel1_x_nonvector = xdim0;
xdim0_advec_mom_kernel1_x_nonvector_h = xdim0;
xdim1_advec_mom_kernel1_x_nonvector = xdim1;
xdim1_advec_mom_kernel1_x_nonvector_h = xdim1;
xdim2_advec_mom_kernel1_x_nonvector = xdim2;
xdim2_advec_mom_kernel1_x_nonvector_h = xdim2;
xdim3_advec_mom_kernel1_x_nonvector = xdim3;
xdim3_advec_mom_kernel1_x_nonvector_h = xdim3;
xdim4_advec_mom_kernel1_x_nonvector = xdim4;
xdim4_advec_mom_kernel1_x_nonvector_h = xdim4;
}
// Halo Exchanges
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 5);
#else
ops_H_D_exchanges_host(args, 5);
#endif
ops_halo_exchanges(args, 5, range);
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 5);
#else
ops_H_D_exchanges_host(args, 5);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[75].mpi_time += t2 - t1;
}
advec_mom_kernel1_x_nonvector_c_wrapper(p_a0, p_a1, p_a2, p_a3, p_a4, x_size,
y_size);
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[75].time += t1 - t2;
}
#ifdef OPS_GPU
ops_set_dirtybit_device(args, 5);
#else
ops_set_dirtybit_host(args, 5);
#endif
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[75].mpi_time += t2 - t1;
OPS_kernels[75].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[75].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[75].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[75].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[75].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
示例3: ops_par_loop_update_halo_kernel5_plus_4_left
//.........这里部分代码省略.........
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
// set up initial pointers and exchange halos if necessary
int base0 = args[0].dat->base_offset +
(OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
start[0] * args[0].stencil->stride[0];
base0 = base0 +
(OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
args[0].dat->size[0] * start[1] * args[0].stencil->stride[1];
base0 = base0 +
(OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
args[0].dat->size[0] * args[0].dat->size[1] * start[2] *
args[0].stencil->stride[2];
p_a[0] = (char *)args[0].data + base0;
int base1 = args[1].dat->base_offset +
(OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size) *
start[0] * args[1].stencil->stride[0];
base1 = base1 +
(OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size) *
args[1].dat->size[0] * start[1] * args[1].stencil->stride[1];
base1 = base1 +
(OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size) *
args[1].dat->size[0] * args[1].dat->size[1] * start[2] *
args[1].stencil->stride[2];
p_a[1] = (char *)args[1].data + base1;
p_a[2] = args[2].data;
// initialize global variable with the dimension of dats
xdim0 = args[0].dat->size[0];
ydim0 = args[0].dat->size[1];
xdim1 = args[1].dat->size[0];
ydim1 = args[1].dat->size[1];
// Halo Exchanges
ops_H_D_exchanges_host(args, 3);
ops_halo_exchanges(args, 3, range);
ops_H_D_exchanges_host(args, 3);
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[88].mpi_time += t1 - t2;
}
int n_x;
for (int n_z = start[2]; n_z < end[2]; n_z++) {
for (int n_y = start[1]; n_y < end[1]; n_y++) {
#pragma novector
for (n_x = start[0];
n_x < start[0] + ((end[0] - start[0]) / SIMD_VEC) * SIMD_VEC;
n_x += SIMD_VEC) {
// call kernel function, passing in pointers to data -vectorised
#pragma simd
for (int i = 0; i < SIMD_VEC; i++) {
update_halo_kernel5_plus_4_left((double *)p_a[0] + i * 1 * 1,
(double *)p_a[1] + i * 1 * 1,
(int *)p_a[2]);
}
// shift pointers to data x direction
p_a[0] = p_a[0] + (dat0 * off0_0) * SIMD_VEC;
p_a[1] = p_a[1] + (dat1 * off1_0) * SIMD_VEC;
}
for (int n_x = start[0] + ((end[0] - start[0]) / SIMD_VEC) * SIMD_VEC;
n_x < end[0]; n_x++) {
// call kernel function, passing in pointers to data - remainder
update_halo_kernel5_plus_4_left((double *)p_a[0], (double *)p_a[1],
(int *)p_a[2]);
// shift pointers to data x direction
p_a[0] = p_a[0] + (dat0 * off0_0);
p_a[1] = p_a[1] + (dat1 * off1_0);
}
// shift pointers to data y direction
p_a[0] = p_a[0] + (dat0 * off0_1);
p_a[1] = p_a[1] + (dat1 * off1_1);
}
// shift pointers to data z direction
p_a[0] = p_a[0] + (dat0 * off0_2);
p_a[1] = p_a[1] + (dat1 * off1_2);
}
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[88].time += t2 - t1;
}
ops_set_dirtybit_host(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c1, &t1);
OPS_kernels[88].mpi_time += t1 - t2;
OPS_kernels[88].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[88].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
示例4: ops_par_loop_update_halo_kernel1_b2
//.........这里部分代码省略.........
xdim5 = args[5].dat->size[0];
ydim5 = args[5].dat->size[1];
xdim6 = args[6].dat->size[0];
ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_b2_h ||
ydim0 != ydim0_update_halo_kernel1_b2_h ||
xdim1 != xdim1_update_halo_kernel1_b2_h ||
ydim1 != ydim1_update_halo_kernel1_b2_h ||
xdim2 != xdim2_update_halo_kernel1_b2_h ||
ydim2 != ydim2_update_halo_kernel1_b2_h ||
xdim3 != xdim3_update_halo_kernel1_b2_h ||
ydim3 != ydim3_update_halo_kernel1_b2_h ||
xdim4 != xdim4_update_halo_kernel1_b2_h ||
ydim4 != ydim4_update_halo_kernel1_b2_h ||
xdim5 != xdim5_update_halo_kernel1_b2_h ||
ydim5 != ydim5_update_halo_kernel1_b2_h ||
xdim6 != xdim6_update_halo_kernel1_b2_h ||
ydim6 != ydim6_update_halo_kernel1_b2_h) {
xdim0_update_halo_kernel1_b2 = xdim0;
xdim0_update_halo_kernel1_b2_h = xdim0;
ydim0_update_halo_kernel1_b2 = ydim0;
ydim0_update_halo_kernel1_b2_h = ydim0;
xdim1_update_halo_kernel1_b2 = xdim1;
xdim1_update_halo_kernel1_b2_h = xdim1;
ydim1_update_halo_kernel1_b2 = ydim1;
ydim1_update_halo_kernel1_b2_h = ydim1;
xdim2_update_halo_kernel1_b2 = xdim2;
xdim2_update_halo_kernel1_b2_h = xdim2;
ydim2_update_halo_kernel1_b2 = ydim2;
ydim2_update_halo_kernel1_b2_h = ydim2;
xdim3_update_halo_kernel1_b2 = xdim3;
xdim3_update_halo_kernel1_b2_h = xdim3;
ydim3_update_halo_kernel1_b2 = ydim3;
ydim3_update_halo_kernel1_b2_h = ydim3;
xdim4_update_halo_kernel1_b2 = xdim4;
xdim4_update_halo_kernel1_b2_h = xdim4;
ydim4_update_halo_kernel1_b2 = ydim4;
ydim4_update_halo_kernel1_b2_h = ydim4;
xdim5_update_halo_kernel1_b2 = xdim5;
xdim5_update_halo_kernel1_b2_h = xdim5;
ydim5_update_halo_kernel1_b2 = ydim5;
ydim5_update_halo_kernel1_b2_h = ydim5;
xdim6_update_halo_kernel1_b2 = xdim6;
xdim6_update_halo_kernel1_b2_h = xdim6;
ydim6_update_halo_kernel1_b2 = ydim6;
ydim6_update_halo_kernel1_b2_h = ydim6;
}
// Halo Exchanges
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 8);
#else
ops_H_D_exchanges_host(args, 8);
#endif
ops_halo_exchanges(args, 8, range);
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 8);
#else
ops_H_D_exchanges_host(args, 8);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[12].mpi_time += t2 - t1;
}
update_halo_kernel1_b2_c_wrapper(p_a0, p_a1, p_a2, p_a3, p_a4, p_a5, p_a6,
p_a7, x_size, y_size, z_size);
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[12].time += t1 - t2;
}
#ifdef OPS_GPU
ops_set_dirtybit_device(args, 8);
#else
ops_set_dirtybit_host(args, 8);
#endif
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
ops_set_halo_dirtybit3(&args[6], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[12].mpi_time += t2 - t1;
OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
示例5: ops_par_loop_update_halo_kernel3_plus_2_a
//.........这里部分代码省略.........
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = 1 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
args[0].dat->size[0] * 1 * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
args[0].dat->size[0] * 1 * args[0].dat->size[1] * 1 *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = 1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
args[1].dat->size[0] * 1 * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
args[1].dat->size[0] * 1 * args[1].dat->size[1] * 1 *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
ops_H_D_exchanges_device(args, 3);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[60].mpi_time += t2 - t1;
}
if (globalWorkSize[0] > 0 && globalWorkSize[1] > 0 && globalWorkSize[2] > 0) {
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[60], 0, sizeof(cl_mem),
(void *)&arg0.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[60], 1, sizeof(cl_mem),
(void *)&arg1.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[60], 2, sizeof(cl_mem),
(void *)&arg2.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[60], 3, sizeof(cl_int),
(void *)&base0));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[60], 4, sizeof(cl_int),
(void *)&base1));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[60], 5, sizeof(cl_int),
(void *)&x_size));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[60], 6, sizeof(cl_int),
(void *)&y_size));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[60], 7, sizeof(cl_int),
(void *)&z_size));
// call/enque opencl kernel wrapper function
clSafeCall(clEnqueueNDRangeKernel(
OPS_opencl_core.command_queue, OPS_opencl_core.kernel[60], 3, NULL,
globalWorkSize, localWorkSize, 0, NULL, NULL));
}
if (OPS_diags > 1) {
clSafeCall(clFinish(OPS_opencl_core.command_queue));
}
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[60].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[60].mpi_time += t2 - t1;
OPS_kernels[60].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[60].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
示例6: ops_par_loop_update_halo_kernel1_l2
//.........这里部分代码省略.........
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif //OPS_MPI
int base3 = 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3 + args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
base3 = base3 + args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif //OPS_MPI
int base4 = 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4 + args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
base4 = base4 + args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d];
#endif //OPS_MPI
int base5 = 1 *
(start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]);
base5 = base5 + args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]);
base5 = base5 + args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d];
#endif //OPS_MPI
int base6 = 1 *
(start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]);
base6 = base6 + args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]);
base6 = base6 + args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]);
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
ops_H_D_exchanges_device(args, 8);
ops_timers_core(&c1,&t1);
OPS_kernels[45].mpi_time += t1-t2;
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 0, sizeof(cl_mem), (void*) &arg0.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 1, sizeof(cl_mem), (void*) &arg1.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 2, sizeof(cl_mem), (void*) &arg2.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 3, sizeof(cl_mem), (void*) &arg3.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 4, sizeof(cl_mem), (void*) &arg4.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 5, sizeof(cl_mem), (void*) &arg5.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 6, sizeof(cl_mem), (void*) &arg6.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 7, sizeof(cl_mem), (void*) &arg7.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 8, sizeof(cl_int), (void*) &base0 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 9, sizeof(cl_int), (void*) &base1 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 10, sizeof(cl_int), (void*) &base2 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 11, sizeof(cl_int), (void*) &base3 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 12, sizeof(cl_int), (void*) &base4 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 13, sizeof(cl_int), (void*) &base5 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 14, sizeof(cl_int), (void*) &base6 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 15, sizeof(cl_int), (void*) &x_size ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 16, sizeof(cl_int), (void*) &y_size ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[45], 17, sizeof(cl_int), (void*) &z_size ));
//call/enque opencl kernel wrapper function
clSafeCall( clEnqueueNDRangeKernel(OPS_opencl_core.command_queue, OPS_opencl_core.kernel[45], 3, NULL, globalWorkSize, localWorkSize, 0, NULL, NULL) );
if (OPS_diags>1) {
clSafeCall( clFinish(OPS_opencl_core.command_queue) );
}
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[45].time += t2-t1;
OPS_kernels[45].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[45].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[45].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[45].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[45].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[45].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[45].transfer += ops_compute_transfer(dim, range, &arg6);
}
示例7: ops_par_loop_preproc_kernel
//.........这里部分代码省略.........
start[2] * args[10].stencil->stride[2];
p_a[10] = (char *)args[10].data + base10;
p_a[11] = (char *)arg_idx;
//initialize global variable with the dimension of dats
xdim0 = args[0].dat->size[0];
ydim0 = args[0].dat->size[1];
xdim1 = args[1].dat->size[0];
ydim1 = args[1].dat->size[1];
xdim2 = args[2].dat->size[0];
ydim2 = args[2].dat->size[1];
xdim3 = args[3].dat->size[0];
ydim3 = args[3].dat->size[1];
xdim4 = args[4].dat->size[0];
ydim4 = args[4].dat->size[1];
xdim5 = args[5].dat->size[0];
ydim5 = args[5].dat->size[1];
xdim6 = args[6].dat->size[0];
ydim6 = args[6].dat->size[1];
xdim7 = args[7].dat->size[0];
ydim7 = args[7].dat->size[1];
xdim8 = args[8].dat->size[0];
ydim8 = args[8].dat->size[1];
xdim9 = args[9].dat->size[0];
ydim9 = args[9].dat->size[1];
xdim10 = args[10].dat->size[0];
ydim10 = args[10].dat->size[1];
//Halo Exchanges
ops_H_D_exchanges_host(args, 12);
ops_halo_exchanges(args,12,range);
ops_H_D_exchanges_host(args, 12);
if (OPS_diags > 1) {
ops_timers_core(&c1,&t1);
OPS_kernels[1].mpi_time += t1-t2;
}
int n_x;
for ( int n_z=start[2]; n_z<end[2]; n_z++ ){
for ( int n_y=start[1]; n_y<end[1]; n_y++ ){
#pragma novector
for( n_x=start[0]; n_x<start[0]+((end[0]-start[0])/SIMD_VEC)*SIMD_VEC; n_x+=SIMD_VEC ) {
//call kernel function, passing in pointers to data -vectorised
for ( int i=0; i<SIMD_VEC; i++ ){
preproc_kernel( (double *)p_a[0]+ i*1*1, (double *)p_a[1]+ i*1*1, (double *)p_a[2]+ i*1*1,
(double *)p_a[3]+ i*1*1, (double *)p_a[4]+ i*1*1, (double *)p_a[5]+ i*1*1, (double *)p_a[6]+ i*1*1,
(double *)p_a[7]+ i*1*1, (double *)p_a[8]+ i*1*1, (double *)p_a[9]+ i*1*1, (double *)p_a[10]+ i*1*1,
(int *)p_a[11] );
arg_idx[0]++;
}
//shift pointers to data x direction
p_a[0]= p_a[0] + (dat0 * off0_0)*SIMD_VEC;
p_a[1]= p_a[1] + (dat1 * off1_0)*SIMD_VEC;
p_a[2]= p_a[2] + (dat2 * off2_0)*SIMD_VEC;
p_a[3]= p_a[3] + (dat3 * off3_0)*SIMD_VEC;
p_a[4]= p_a[4] + (dat4 * off4_0)*SIMD_VEC;
p_a[5]= p_a[5] + (dat5 * off5_0)*SIMD_VEC;
p_a[6]= p_a[6] + (dat6 * off6_0)*SIMD_VEC;
p_a[7]= p_a[7] + (dat7 * off7_0)*SIMD_VEC;
p_a[8]= p_a[8] + (dat8 * off8_0)*SIMD_VEC;
示例8: ops_par_loop_update_halo_kernel2_zvel_plus_4_right
//.........这里部分代码省略.........
base0 = base0 +
(OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
args[0].dat->size[0] * start[1] * args[0].stencil->stride[1];
base0 = base0 +
(OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
args[0].dat->size[0] * args[0].dat->size[1] * start[2] *
args[0].stencil->stride[2];
#ifdef OPS_GPU
double *p_a0 = (double *)((char *)args[0].data_d + base0);
#else
double *p_a0 = (double *)((char *)args[0].data + base0);
#endif
int base1 = args[1].dat->base_offset +
(OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size) *
start[0] * args[1].stencil->stride[0];
base1 = base1 +
(OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size) *
args[1].dat->size[0] * start[1] * args[1].stencil->stride[1];
base1 = base1 +
(OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size) *
args[1].dat->size[0] * args[1].dat->size[1] * start[2] *
args[1].stencil->stride[2];
#ifdef OPS_GPU
double *p_a1 = (double *)((char *)args[1].data_d + base1);
#else
double *p_a1 = (double *)((char *)args[1].data + base1);
#endif
#ifdef OPS_GPU
int *p_a2 = (int *)args[2].data_d;
#else
int *p_a2 = arg2h;
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
// initialize global variable with the dimension of dats
xdim0 = args[0].dat->size[0];
ydim0 = args[0].dat->size[1];
xdim1 = args[1].dat->size[0];
ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_right_h ||
ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_right_h ||
xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_right_h ||
ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_right_h) {
xdim0_update_halo_kernel2_zvel_plus_4_right = xdim0;
xdim0_update_halo_kernel2_zvel_plus_4_right_h = xdim0;
ydim0_update_halo_kernel2_zvel_plus_4_right = ydim0;
ydim0_update_halo_kernel2_zvel_plus_4_right_h = ydim0;
xdim1_update_halo_kernel2_zvel_plus_4_right = xdim1;
xdim1_update_halo_kernel2_zvel_plus_4_right_h = xdim1;
ydim1_update_halo_kernel2_zvel_plus_4_right = ydim1;
ydim1_update_halo_kernel2_zvel_plus_4_right_h = ydim1;
}
// Halo Exchanges
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 3);
#else
ops_H_D_exchanges_host(args, 3);
#endif
ops_halo_exchanges(args, 3, range);
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 3);
#else
ops_H_D_exchanges_host(args, 3);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[53].mpi_time += t2 - t1;
}
update_halo_kernel2_zvel_plus_4_right_c_wrapper(p_a0, p_a1, p_a2, x_size,
y_size, z_size);
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[53].time += t1 - t2;
}
#ifdef OPS_GPU
ops_set_dirtybit_device(args, 3);
#else
ops_set_dirtybit_host(args, 3);
#endif
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[53].mpi_time += t2 - t1;
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
示例9: ops_par_loop_left_bndcon
// host stub function
void ops_par_loop_left_bndcon(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
//Timing
double t1,t2,c1,c2;
char *p_a[2];
int offs[2][2];
ops_arg args[2] = { arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args,2,range,2)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(2,"left_bndcon");
OPS_kernels[2].count++;
ops_timers_core(&c2,&t2);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
#ifdef OPS_DEBUG
ops_register_args(args, "left_bndcon");
#endif
offs[0][0] = args[0].stencil->stride[0]*1; //unit step in x dimension
offs[0][1] = off2D(1, &start[0],
&end[0],args[0].dat->size, args[0].stencil->stride) - offs[0][0];
int arg_idx[2];
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0]+start[0];
arg_idx[1] = sb->decomp_disp[1]+start[1];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
#endif
int off0_0 = offs[0][0];
int off0_1 = offs[0][1];
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
//set up initial pointers and exchange halos if necessary
int base0 = args[0].dat->base_offset + (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) * start[0] * args[0].stencil->stride[0];
base0 = base0+ (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
args[0].dat->size[0] *
start[1] * args[0].stencil->stride[1];
p_a[0] = (char *)args[0].data + base0;
p_a[1] = (char *)arg_idx;
//initialize global variable with the dimension of dats
xdim0 = args[0].dat->size[0];
//Halo Exchanges
ops_H_D_exchanges_host(args, 2);
ops_halo_exchanges(args,2,range);
ops_H_D_exchanges_host(args, 2);
if (OPS_diags > 1) {
ops_timers_core(&c1,&t1);
OPS_kernels[2].mpi_time += t1-t2;
}
int n_x;
//.........这里部分代码省略.........
示例10: ops_par_loop_tea_leaf_norm2_kernel
// host stub function
void ops_par_loop_tea_leaf_norm2_kernel(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
int offs[2][2];
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 39))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(39, "tea_leaf_norm2_kernel");
OPS_kernels[39].count++;
ops_timers_core(&c1, &t1);
}
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
#endif
// compute locally allocated range for the sub-block
int start[2];
int end[2];
int arg_idx[2];
#ifdef OPS_MPI
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
if (end[n] < start[n])
end[n] = start[n];
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
#ifdef OPS_DEBUG
ops_register_args(args, "tea_leaf_norm2_kernel");
#endif
offs[0][0] = args[0].stencil->stride[0] * 1; // unit step in x dimension
offs[0][1] =
off2D(1, &start[0], &end[0], args[0].dat->size, args[0].stencil->stride) -
offs[0][0];
int off0_0 = offs[0][0];
int off0_1 = offs[0][1];
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
#ifdef OPS_MPI
double *arg1h =
(double *)(((ops_reduction)args[1].data)->data +
((ops_reduction)args[1].data)->size * block->index);
#else
double *arg1h = (double *)(((ops_reduction)args[1].data)->data);
#endif
// Halo Exchanges
ops_H_D_exchanges_host(args, 2);
ops_halo_exchanges(args, 2, range);
ops_H_D_exchanges_host(args, 2);
#ifdef _OPENMP
int nthreads = omp_get_max_threads();
#else
int nthreads = 1;
#endif
// allocate and initialise arrays for global reduction
// assumes a max of MAX_REDUCT_THREADS threads with a cacche line size of 64
// bytes
double arg_gbl1[MAX(1, 64) * MAX_REDUCT_THREADS];
for (int thr = 0; thr < nthreads; thr++) {
for (int d = 0; d < 1; d++) {
arg_gbl1[d + 64 * thr] = ZERO_double;
}
//.........这里部分代码省略.........
示例11: ops_par_loop_calc_dt_kernel_get
//.........这里部分代码省略.........
#endif //OPS_MPI
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
#ifdef OPS_GPU
double *p_a0 = (double *)((char *)args[0].data_d + base0);
#else
double *p_a0 = (double *)((char *)args[0].data + base0);
#endif
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
#ifdef OPS_GPU
double *p_a1 = (double *)((char *)args[1].data_d + base1);
#else
double *p_a1 = (double *)((char *)args[1].data + base1);
#endif
double *p_a2 = arg2h;
double *p_a3 = arg3h;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif //OPS_MPI
int base4 = dat4 * 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]);
#ifdef OPS_GPU
double *p_a4 = (double *)((char *)args[4].data_d + base4);
#else
double *p_a4 = (double *)((char *)args[4].data + base4);
#endif
double *p_a5 = arg5h;
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 6);
#else
ops_H_D_exchanges_host(args, 6);
#endif
ops_halo_exchanges(args,6,range);
ops_timers_core(&c1,&t1);
OPS_kernels[128].mpi_time += t1-t2;
calc_dt_kernel_get_c_wrapper(
p_a0,
p_a1,
p_a2,
p_a3,
p_a4,
p_a5,
x_size, y_size, z_size);
ops_timers_core(&c2,&t2);
OPS_kernels[128].time += t2-t1;
#ifdef OPS_GPU
ops_set_dirtybit_device(args, 6);
#else
ops_set_dirtybit_host(args, 6);
#endif
//Update kernel record
OPS_kernels[128].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[128].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[128].transfer += ops_compute_transfer(dim, range, &arg4);
}
示例12: ops_par_loop_poisson_kernel_initialguess
// host stub function
void ops_par_loop_poisson_kernel_initialguess(char const *name, ops_block block,
int dim, int *range,
ops_arg arg0) {
// Timing
double t1, t2, c1, c2;
ops_arg args[1] = {arg0};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 1, range, 2))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(2, "poisson_kernel_initialguess");
OPS_kernels[2].count++;
ops_timers_core(&c1, &t1);
}
// compute localy allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
#endif // OPS_MPI
int arg_idx[2];
int arg_idx_base[2];
#ifdef OPS_MPI
if (compute_ranges(args, 1, block, range, start, end, arg_idx) < 0)
return;
#else // OPS_MPI
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
arg_idx[n] = start[n];
}
#endif
for (int n = 0; n < 2; n++) {
arg_idx_base[n] = arg_idx[n];
}
int dat0 = args[0].dat->elem_size;
// set up initial pointers
int base0 = args[0].dat->base_offset +
(OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
start[0] * args[0].stencil->stride[0];
base0 = base0 +
(OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
args[0].dat->size[0] * start[1] * args[0].stencil->stride[1];
#ifdef OPS_GPU
double *p_a0 = (double *)((char *)args[0].data_d + base0);
#else
double *p_a0 = (double *)((char *)args[0].data + base0);
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
// initialize global variable with the dimension of dats
xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_poisson_kernel_initialguess_h) {
xdim0_poisson_kernel_initialguess = xdim0;
xdim0_poisson_kernel_initialguess_h = xdim0;
}
// Halo Exchanges
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 1);
#else
ops_H_D_exchanges_host(args, 1);
#endif
ops_halo_exchanges(args, 1, range);
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 1);
#else
ops_H_D_exchanges_host(args, 1);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[2].mpi_time += t2 - t1;
}
poisson_kernel_initialguess_c_wrapper(p_a0, x_size, y_size);
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[2].time += t1 - t2;
}
#ifdef OPS_GPU
ops_set_dirtybit_device(args, 1);
#else
ops_set_dirtybit_host(args, 1);
#endif
ops_set_halo_dirtybit3(&args[0], range);
//.........这里部分代码省略.........
示例13: ops_par_loop_calc_dt_kernel_get
//.........这里部分代码省略.........
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
int r_bytes2 = reduct_bytes/sizeof(double);
arg2.data = OPS_reduct_h + reduct_bytes;
arg2.data_d = OPS_reduct_d;// + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg2.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
int r_bytes3 = reduct_bytes/sizeof(double);
arg3.data = OPS_reduct_h + reduct_bytes;
arg3.data_d = OPS_reduct_d;// + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg3.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0 + args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1 + args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
ops_H_D_exchanges_device(args, 4);
ops_timers_core(&c1,&t1);
OPS_kernels[29].mpi_time += t1-t2;
int nthread = OPS_block_size_x*OPS_block_size_y;
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[29], 0, sizeof(cl_mem), (void*) &arg0.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[29], 1, sizeof(cl_mem), (void*) &arg1.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[29], 2, sizeof(cl_mem), (void*) &arg2.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[29], 3, nthread*sizeof(double), NULL));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[29], 4, sizeof(cl_int), (void*) &r_bytes2 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[29], 5, sizeof(cl_mem), (void*) &arg3.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[29], 6, nthread*sizeof(double), NULL));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[29], 7, sizeof(cl_int), (void*) &r_bytes3 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[29], 8, sizeof(cl_int), (void*) &base0 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[29], 9, sizeof(cl_int), (void*) &base1 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[29], 10, sizeof(cl_int), (void*) &x_size ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[29], 11, sizeof(cl_int), (void*) &y_size ));
//call/enque opencl kernel wrapper function
clSafeCall( clEnqueueNDRangeKernel(OPS_opencl_core.command_queue, OPS_opencl_core.kernel[29], 3, NULL, globalWorkSize, localWorkSize, 0, NULL, NULL) );
if (OPS_diags>1) {
clSafeCall( clFinish(OPS_opencl_core.command_queue) );
}
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg2h[d] = arg2h[d] + ((double *)arg2.data)[d+b*1];
}
}
arg2.data = (char *)arg2h;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg3h[d] = arg3h[d] + ((double *)arg3.data)[d+b*1];
}
}
arg3.data = (char *)arg3h;
ops_set_dirtybit_device(args, 4);
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[29].time += t2-t1;
OPS_kernels[29].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[29].transfer += ops_compute_transfer(dim, range, &arg1);
}
示例14: ops_par_loop_PdV_kernel_predict
//.........这里部分代码省略.........
int base11 = 1 *
(start[0] * args[11].stencil->stride[0] - args[11].dat->base[0] - d_m[0]);
base11 = base11 + args[11].dat->size[0] *
(start[1] * args[11].stencil->stride[1] - args[11].dat->base[1] - d_m[1]);
base11 = base11 + args[11].dat->size[0] * args[11].dat->size[1] *
(start[2] * args[11].stencil->stride[2] - args[11].dat->base[2] - d_m[2]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d] + OPS_sub_dat_list[args[12].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d];
#endif //OPS_MPI
int base12 = 1 *
(start[0] * args[12].stencil->stride[0] - args[12].dat->base[0] - d_m[0]);
base12 = base12 + args[12].dat->size[0] *
(start[1] * args[12].stencil->stride[1] - args[12].dat->base[1] - d_m[1]);
base12 = base12 + args[12].dat->size[0] * args[12].dat->size[1] *
(start[2] * args[12].stencil->stride[2] - args[12].dat->base[2] - d_m[2]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d] + OPS_sub_dat_list[args[13].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d];
#endif //OPS_MPI
int base13 = 1 *
(start[0] * args[13].stencil->stride[0] - args[13].dat->base[0] - d_m[0]);
base13 = base13 + args[13].dat->size[0] *
(start[1] * args[13].stencil->stride[1] - args[13].dat->base[1] - d_m[1]);
base13 = base13 + args[13].dat->size[0] * args[13].dat->size[1] *
(start[2] * args[13].stencil->stride[2] - args[13].dat->base[2] - d_m[2]);
ops_H_D_exchanges_device(args, 14);
ops_halo_exchanges(args,14,range);
ops_H_D_exchanges_device(args, 14);
ops_timers_core(&c1,&t1);
OPS_kernels[5].mpi_time += t1-t2;
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 0, sizeof(cl_mem), (void*) &arg0.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 1, sizeof(cl_mem), (void*) &arg1.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 2, sizeof(cl_mem), (void*) &arg2.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 3, sizeof(cl_mem), (void*) &arg3.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 4, sizeof(cl_mem), (void*) &arg4.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 5, sizeof(cl_mem), (void*) &arg5.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 6, sizeof(cl_mem), (void*) &arg6.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 7, sizeof(cl_mem), (void*) &arg7.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 8, sizeof(cl_mem), (void*) &arg8.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 9, sizeof(cl_mem), (void*) &arg9.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 10, sizeof(cl_mem), (void*) &arg10.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 11, sizeof(cl_mem), (void*) &arg11.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 12, sizeof(cl_mem), (void*) &arg12.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 13, sizeof(cl_mem), (void*) &arg13.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 14, sizeof(cl_double), (void*) &dt ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 15, sizeof(cl_int), (void*) &base0 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 16, sizeof(cl_int), (void*) &base1 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 17, sizeof(cl_int), (void*) &base2 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 18, sizeof(cl_int), (void*) &base3 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 19, sizeof(cl_int), (void*) &base4 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 20, sizeof(cl_int), (void*) &base5 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 21, sizeof(cl_int), (void*) &base6 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 22, sizeof(cl_int), (void*) &base7 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 23, sizeof(cl_int), (void*) &base8 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 24, sizeof(cl_int), (void*) &base9 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 25, sizeof(cl_int), (void*) &base10 ));
示例15: ops_par_loop_calupwindeff_kernel
// host stub function
void ops_par_loop_calupwindeff_kernel(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5,
ops_arg arg6) {
// Timing
double t1, t2, c1, c2;
int offs[7][1];
ops_arg args[7] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 7, range, 11))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(11, "calupwindeff_kernel");
OPS_kernels[11].count++;
ops_timers_core(&c1, &t1);
}
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
#endif
// compute locally allocated range for the sub-block
int start[1];
int end[1];
int arg_idx[1];
#ifdef OPS_MPI
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
if (end[n] < start[n])
end[n] = start[n];
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
#ifdef OPS_DEBUG
ops_register_args(args, "calupwindeff_kernel");
#endif
offs[0][0] = args[0].stencil->stride[0] * 1; // unit step in x dimension
offs[1][0] = args[1].stencil->stride[0] * 1; // unit step in x dimension
offs[2][0] = args[2].stencil->stride[0] * 1; // unit step in x dimension
offs[3][0] = args[3].stencil->stride[0] * 1; // unit step in x dimension
offs[4][0] = args[4].stencil->stride[0] * 1; // unit step in x dimension
offs[5][0] = args[5].stencil->stride[0] * 1; // unit step in x dimension
offs[6][0] = args[6].stencil->stride[0] * 1; // unit step in x dimension
int off0_0 = offs[0][0];
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int off1_0 = offs[1][0];
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int off2_0 = offs[2][0];
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int off3_0 = offs[3][0];
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int off4_0 = offs[4][0];
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int off5_0 = offs[5][0];
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int off6_0 = offs[6][0];
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
// Halo Exchanges
ops_H_D_exchanges_host(args, 7);
ops_halo_exchanges(args, 7, range);
//.........这里部分代码省略.........