本文整理汇总了C++中ops_set_halo_dirtybit3函数的典型用法代码示例。如果您正苦于以下问题:C++ ops_set_halo_dirtybit3函数的具体用法?C++ ops_set_halo_dirtybit3怎么用?C++ ops_set_halo_dirtybit3使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ops_set_halo_dirtybit3函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ops_par_loop_advec_cell_kernel3_zdir
//.........这里部分代码省略.........
d_m[d] = args[7].dat->d_m[d];
#endif
int base7 = 1 * 1 * (start[0] * args[7].stencil->stride[0] -
args[7].dat->base[0] - d_m[0]);
base7 = base7 +
args[7].dat->size[0] * 1 * (start[1] * args[7].stencil->stride[1] -
args[7].dat->base[1] - d_m[1]);
base7 = base7 +
args[7].dat->size[0] * 1 * args[7].dat->size[1] * 1 *
(start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] -
d_m[2]);
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
ops_H_D_exchanges_device(args, 8);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[118].mpi_time += t2 - t1;
}
if (globalWorkSize[0] > 0 && globalWorkSize[1] > 0 && globalWorkSize[2] > 0) {
clSafeCall(clEnqueueWriteBuffer(
OPS_opencl_core.command_queue, OPS_opencl_core.constant[7], CL_TRUE, 0,
sizeof(field_type) * 1, (void *)&field, 0, NULL, NULL));
clSafeCall(clFlush(OPS_opencl_core.command_queue));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 0, sizeof(cl_mem),
(void *)&arg0.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 1, sizeof(cl_mem),
(void *)&arg1.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 2, sizeof(cl_mem),
(void *)&arg2.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 3, sizeof(cl_mem),
(void *)&arg3.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 4, sizeof(cl_mem),
(void *)&arg4.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 5, sizeof(cl_mem),
(void *)&arg5.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 6, sizeof(cl_mem),
(void *)&arg6.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 7, sizeof(cl_mem),
(void *)&arg7.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 8, sizeof(cl_mem),
(void *)&OPS_opencl_core.constant[7]));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 9, sizeof(cl_int),
(void *)&base0));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 10, sizeof(cl_int),
(void *)&base1));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 11, sizeof(cl_int),
(void *)&base2));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 12, sizeof(cl_int),
(void *)&base3));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 13, sizeof(cl_int),
(void *)&base4));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 14, sizeof(cl_int),
(void *)&base5));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 15, sizeof(cl_int),
(void *)&base6));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 16, sizeof(cl_int),
(void *)&base7));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 17, sizeof(cl_int),
(void *)&x_size));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 18, sizeof(cl_int),
(void *)&y_size));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[118], 19, sizeof(cl_int),
(void *)&z_size));
// call/enque opencl kernel wrapper function
clSafeCall(clEnqueueNDRangeKernel(
OPS_opencl_core.command_queue, OPS_opencl_core.kernel[118], 3, NULL,
globalWorkSize, localWorkSize, 0, NULL, NULL));
}
if (OPS_diags > 1) {
clSafeCall(clFinish(OPS_opencl_core.command_queue));
}
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[118].time += t1 - t2;
}
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[6], range);
ops_set_halo_dirtybit3(&args[7], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[118].mpi_time += t2 - t1;
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[118].transfer += ops_compute_transfer(dim, start, end, &arg7);
}
}
示例2: ops_par_loop_left_bndcon
//.........这里部分代码省略.........
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args,2,range,2)) return;
#endif
ops_timing_realloc(2,"left_bndcon");
OPS_kernels[2].count++;
//compute localy allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int arg_idx[2];
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0]+start[0];
arg_idx[1] = sb->decomp_disp[1]+start[1];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
#endif
xdim0 = args[0].dat->size[0];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_left_bndcon_h) {
xdim0_left_bndcon = xdim0;
xdim0_left_bndcon_h = xdim0;
}
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
double *p_a0 = (double *)((char *)args[0].data + base0);
int *p_a1 = NULL;
ops_H_D_exchanges_host(args, 2);
ops_halo_exchanges(args,2,range);
ops_timers_core(&c1,&t1);
OPS_kernels[2].mpi_time += t1-t2;
left_bndcon_c_wrapper(
p_a0,
p_a1,
arg_idx[0], arg_idx[1],
x_size, y_size);
ops_timers_core(&c2,&t2);
OPS_kernels[2].time += t2-t1;
ops_set_dirtybit_host(args, 2);
ops_set_halo_dirtybit3(&args[0],range);
//Update kernel record
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
示例3: ops_par_loop_update_halo_kernel2_zvel_plus_4_right
//.........这里部分代码省略.........
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = 1 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
args[0].dat->size[0] * 1 * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
args[0].dat->size[0] * 1 * args[0].dat->size[1] * 1 *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = 1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
args[1].dat->size[0] * 1 * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
args[1].dat->size[0] * 1 * args[1].dat->size[1] * 1 *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
ops_H_D_exchanges_device(args, 3);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[53].mpi_time += t2 - t1;
}
if (globalWorkSize[0] > 0 && globalWorkSize[1] > 0 && globalWorkSize[2] > 0) {
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[53], 0, sizeof(cl_mem),
(void *)&arg0.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[53], 1, sizeof(cl_mem),
(void *)&arg1.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[53], 2, sizeof(cl_mem),
(void *)&arg2.data_d));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[53], 3, sizeof(cl_int),
(void *)&base0));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[53], 4, sizeof(cl_int),
(void *)&base1));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[53], 5, sizeof(cl_int),
(void *)&x_size));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[53], 6, sizeof(cl_int),
(void *)&y_size));
clSafeCall(clSetKernelArg(OPS_opencl_core.kernel[53], 7, sizeof(cl_int),
(void *)&z_size));
// call/enque opencl kernel wrapper function
clSafeCall(clEnqueueNDRangeKernel(
OPS_opencl_core.command_queue, OPS_opencl_core.kernel[53], 3, NULL,
globalWorkSize, localWorkSize, 0, NULL, NULL));
}
if (OPS_diags > 1) {
clSafeCall(clFinish(OPS_opencl_core.command_queue));
}
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[53].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[53].mpi_time += t2 - t1;
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
示例4: ops_par_loop_update_halo_kernel2_xvel_minus_4_right
//.........这里部分代码省略.........
int start2 = start_i;
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start0 * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start1 * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start2 * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start0 * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start1 * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start2 * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data + base1;
p_a[2] = (char *)args[2].data;
for (int n_z = start_i; n_z < finish_i; n_z++) {
for (int n_y = start[1]; n_y < end[1]; n_y++) {
for (int n_x = start[0];
n_x < start[0] + (end[0] - start[0]) / SIMD_VEC; n_x++) {
// call kernel function, passing in pointers to data -vectorised
#pragma simd
for (int i = 0; i < SIMD_VEC; i++) {
update_halo_kernel2_xvel_minus_4_right((double *)p_a[0] + i * 1 * 1,
(double *)p_a[1] + i * 1 * 1,
(int *)p_a[2]);
}
// shift pointers to data x direction
p_a[0] = p_a[0] + (dat0 * off0_0) * SIMD_VEC;
p_a[1] = p_a[1] + (dat1 * off1_0) * SIMD_VEC;
}
for (int n_x = start[0] + ((end[0] - start[0]) / SIMD_VEC) * SIMD_VEC;
n_x < end[0]; n_x++) {
// call kernel function, passing in pointers to data - remainder
update_halo_kernel2_xvel_minus_4_right(
(double *)p_a[0], (double *)p_a[1], (int *)p_a[2]);
// shift pointers to data x direction
p_a[0] = p_a[0] + (dat0 * off0_0);
p_a[1] = p_a[1] + (dat1 * off1_0);
}
// shift pointers to data y direction
p_a[0] = p_a[0] + (dat0 * off0_1);
p_a[1] = p_a[1] + (dat1 * off1_1);
}
// shift pointers to data z direction
p_a[0] = p_a[0] + (dat0 * off0_2);
p_a[1] = p_a[1] + (dat1 * off1_2);
}
}
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[29].time += t1 - t2;
}
ops_set_dirtybit_host(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[29].mpi_time += t2 - t1;
OPS_kernels[29].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[29].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
示例5: ops_par_loop_update_halo_kernel1_b1
//.........这里部分代码省略.........
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]);
#ifdef OPS_GPU
double *p_a4 = (double *)((char *)args[4].data_d + base4);
#else
double *p_a4 = (double *)((char *)args[4].data + base4);
#endif
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d];
#endif //OPS_MPI
int base5 = dat5 * 1 *
(start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]);
#ifdef OPS_GPU
double *p_a5 = (double *)((char *)args[5].data_d + base5);
#else
double *p_a5 = (double *)((char *)args[5].data + base5);
#endif
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d];
#endif //OPS_MPI
int base6 = dat6 * 1 *
(start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]);
#ifdef OPS_GPU
double *p_a6 = (double *)((char *)args[6].data_d + base6);
#else
double *p_a6 = (double *)((char *)args[6].data + base6);
#endif
#ifdef OPS_GPU
int *p_a7 = (int *)args[7].data_d;
#else
int *p_a7 = arg7h;
#endif
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 8);
#else
ops_H_D_exchanges_host(args, 8);
#endif
ops_halo_exchanges(args,8,range);
ops_timers_core(&c1,&t1);
OPS_kernels[42].mpi_time += t1-t2;
update_halo_kernel1_b1_c_wrapper(
p_a0,
p_a1,
p_a2,
p_a3,
p_a4,
p_a5,
p_a6,
p_a7,
x_size, y_size, z_size);
ops_timers_core(&c2,&t2);
OPS_kernels[42].time += t2-t1;
#ifdef OPS_GPU
ops_set_dirtybit_device(args, 8);
#else
ops_set_dirtybit_host(args, 8);
#endif
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
//Update kernel record
OPS_kernels[42].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[42].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[42].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[42].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[42].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[42].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[42].transfer += ops_compute_transfer(dim, range, &arg6);
}
示例6: ops_par_loop_viscosity_kernel
//.........这里部分代码省略.........
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1 + args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif //OPS_MPI
int base2 = 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2 + args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif //OPS_MPI
int base3 = 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3 + args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif //OPS_MPI
int base4 = 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4 + args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d];
#endif //OPS_MPI
int base5 = 1 *
(start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]);
base5 = base5 + args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d];
#endif //OPS_MPI
int base6 = 1 *
(start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]);
base6 = base6 + args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]);
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
ops_H_D_exchanges_device(args, 7);
ops_timers_core(&c1,&t1);
OPS_kernels[34].mpi_time += t1-t2;
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 0, sizeof(cl_mem), (void*) &arg0.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 1, sizeof(cl_mem), (void*) &arg1.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 2, sizeof(cl_mem), (void*) &arg2.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 3, sizeof(cl_mem), (void*) &arg3.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 4, sizeof(cl_mem), (void*) &arg4.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 5, sizeof(cl_mem), (void*) &arg5.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 6, sizeof(cl_mem), (void*) &arg6.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 7, sizeof(cl_int), (void*) &base0 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 8, sizeof(cl_int), (void*) &base1 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 9, sizeof(cl_int), (void*) &base2 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 10, sizeof(cl_int), (void*) &base3 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 11, sizeof(cl_int), (void*) &base4 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 12, sizeof(cl_int), (void*) &base5 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 13, sizeof(cl_int), (void*) &base6 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 14, sizeof(cl_int), (void*) &x_size ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[34], 15, sizeof(cl_int), (void*) &y_size ));
//call/enque opencl kernel wrapper function
clSafeCall( clEnqueueNDRangeKernel(OPS_opencl_core.command_queue, OPS_opencl_core.kernel[34], 3, NULL, globalWorkSize, localWorkSize, 0, NULL, NULL) );
if (OPS_diags>1) {
clSafeCall( clFinish(OPS_opencl_core.command_queue) );
}
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[6],range);
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[34].time += t2-t1;
OPS_kernels[34].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[34].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[34].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[34].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[34].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[34].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[34].transfer += ops_compute_transfer(dim, range, &arg6);
}
示例7: ops_par_loop_PdV_kernel_predict
//.........这里部分代码省略.........
ydim3_PdV_kernel_predict = ydim3;
ydim3_PdV_kernel_predict_h = ydim3;
xdim4_PdV_kernel_predict = xdim4;
xdim4_PdV_kernel_predict_h = xdim4;
ydim4_PdV_kernel_predict = ydim4;
ydim4_PdV_kernel_predict_h = ydim4;
xdim5_PdV_kernel_predict = xdim5;
xdim5_PdV_kernel_predict_h = xdim5;
ydim5_PdV_kernel_predict = ydim5;
ydim5_PdV_kernel_predict_h = ydim5;
xdim6_PdV_kernel_predict = xdim6;
xdim6_PdV_kernel_predict_h = xdim6;
ydim6_PdV_kernel_predict = ydim6;
ydim6_PdV_kernel_predict_h = ydim6;
xdim7_PdV_kernel_predict = xdim7;
xdim7_PdV_kernel_predict_h = xdim7;
ydim7_PdV_kernel_predict = ydim7;
ydim7_PdV_kernel_predict_h = ydim7;
xdim8_PdV_kernel_predict = xdim8;
xdim8_PdV_kernel_predict_h = xdim8;
ydim8_PdV_kernel_predict = ydim8;
ydim8_PdV_kernel_predict_h = ydim8;
xdim9_PdV_kernel_predict = xdim9;
xdim9_PdV_kernel_predict_h = xdim9;
ydim9_PdV_kernel_predict = ydim9;
ydim9_PdV_kernel_predict_h = ydim9;
xdim10_PdV_kernel_predict = xdim10;
xdim10_PdV_kernel_predict_h = xdim10;
ydim10_PdV_kernel_predict = ydim10;
ydim10_PdV_kernel_predict_h = ydim10;
xdim11_PdV_kernel_predict = xdim11;
xdim11_PdV_kernel_predict_h = xdim11;
ydim11_PdV_kernel_predict = ydim11;
ydim11_PdV_kernel_predict_h = ydim11;
xdim12_PdV_kernel_predict = xdim12;
xdim12_PdV_kernel_predict_h = xdim12;
ydim12_PdV_kernel_predict = ydim12;
ydim12_PdV_kernel_predict_h = ydim12;
xdim13_PdV_kernel_predict = xdim13;
xdim13_PdV_kernel_predict_h = xdim13;
ydim13_PdV_kernel_predict = ydim13;
ydim13_PdV_kernel_predict_h = ydim13;
}
// Halo Exchanges
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 14);
#else
ops_H_D_exchanges_host(args, 14);
#endif
ops_halo_exchanges(args, 14, range);
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 14);
#else
ops_H_D_exchanges_host(args, 14);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[101].mpi_time += t2 - t1;
}
PdV_kernel_predict_c_wrapper(p_a0, p_a1, p_a2, p_a3, p_a4, p_a5, p_a6, p_a7,
p_a8, p_a9, p_a10, p_a11, p_a12, p_a13, x_size,
y_size, z_size);
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[101].time += t1 - t2;
}
#ifdef OPS_GPU
ops_set_dirtybit_device(args, 14);
#else
ops_set_dirtybit_host(args, 14);
#endif
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[8], range);
ops_set_halo_dirtybit3(&args[11], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[101].mpi_time += t2 - t1;
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg10);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg11);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg12);
OPS_kernels[101].transfer += ops_compute_transfer(dim, start, end, &arg13);
}
}
示例8: ops_par_loop_poisson_kernel_populate
//.........这里部分代码省略.........
dat3 * args[3].dat->size[0] * (start1 * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
p_a[3] = (char *)args[3].data + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start0 * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start1 * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
p_a[4] = (char *)args[4].data + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start0 * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start1 * args[5].stencil->stride[1] -
args[5].dat->base[1] - d_m[1]);
p_a[5] = (char *)args[5].data + base5;
for (int n_y = start_i; n_y < finish_i; n_y++) {
for (int n_x = start[0]; n_x < start[0] + (end[0] - start[0]) / SIMD_VEC;
n_x++) {
// call kernel function, passing in pointers to data -vectorised
for (int i = 0; i < SIMD_VEC; i++) {
poisson_kernel_populate((int *)p_a[0], (int *)p_a[1], arg_idx,
(double *)p_a[3] + i * 1 * 1,
(double *)p_a[4] + i * 1 * 1,
(double *)p_a[5] + i * 1 * 1);
arg_idx[0]++;
}
// shift pointers to data x direction
p_a[3] = p_a[3] + (dat3 * off3_0) * SIMD_VEC;
p_a[4] = p_a[4] + (dat4 * off4_0) * SIMD_VEC;
p_a[5] = p_a[5] + (dat5 * off5_0) * SIMD_VEC;
}
for (int n_x = start[0] + ((end[0] - start[0]) / SIMD_VEC) * SIMD_VEC;
n_x < end[0]; n_x++) {
// call kernel function, passing in pointers to data - remainder
poisson_kernel_populate((int *)p_a[0], (int *)p_a[1], arg_idx,
(double *)p_a[3], (double *)p_a[4],
(double *)p_a[5]);
// shift pointers to data x direction
p_a[3] = p_a[3] + (dat3 * off3_0);
p_a[4] = p_a[4] + (dat4 * off4_0);
p_a[5] = p_a[5] + (dat5 * off5_0);
arg_idx[0]++;
}
// shift pointers to data y direction
p_a[3] = p_a[3] + (dat3 * off3_1);
p_a[4] = p_a[4] + (dat4 * off4_1);
p_a[5] = p_a[5] + (dat5 * off5_1);
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0] + start0;
#else
arg_idx[0] = start0;
#endif
arg_idx[1]++;
}
}
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[0].time += t1 - t2;
}
ops_set_dirtybit_host(args, 6);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
示例9: ops_par_loop_update_halo_kernel1_ba2
//.........这里部分代码省略.........
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d];
#endif //OPS_MPI
int base6 = dat6 * 1 *
(start0 * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start1 * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start2 * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]);
p_a[6] = (char *)args[6].data + base6;
p_a[7] = (char *)args[7].data;
for ( int n_z=start_i; n_z<finish_i; n_z++ ){
for ( int n_y=start[1]; n_y<end[1]; n_y++ ){
for ( int n_x=start[0]; n_x<start[0]+(end[0]-start[0])/SIMD_VEC; n_x++ ){
//call kernel function, passing in pointers to data -vectorised
#pragma simd
for ( int i=0; i<SIMD_VEC; i++ ){
update_halo_kernel1_ba2( (double * )p_a[0]+ i*1, (double * )p_a[1]+ i*1, (double * )p_a[2]+ i*1,
(double * )p_a[3]+ i*1, (double * )p_a[4]+ i*1, (double * )p_a[5]+ i*1, (double * )p_a[6]+ i*1,
(int * )p_a[7] );
}
//shift pointers to data x direction
p_a[0]= p_a[0] + (dat0 * off0_0)*SIMD_VEC;
p_a[1]= p_a[1] + (dat1 * off1_0)*SIMD_VEC;
p_a[2]= p_a[2] + (dat2 * off2_0)*SIMD_VEC;
p_a[3]= p_a[3] + (dat3 * off3_0)*SIMD_VEC;
p_a[4]= p_a[4] + (dat4 * off4_0)*SIMD_VEC;
p_a[5]= p_a[5] + (dat5 * off5_0)*SIMD_VEC;
p_a[6]= p_a[6] + (dat6 * off6_0)*SIMD_VEC;
}
for ( int n_x=start[0]+((end[0]-start[0])/SIMD_VEC)*SIMD_VEC; n_x<end[0]; n_x++ ){
//call kernel function, passing in pointers to data - remainder
update_halo_kernel1_ba2( (double * )p_a[0], (double * )p_a[1], (double * )p_a[2],
(double * )p_a[3], (double * )p_a[4], (double * )p_a[5], (double * )p_a[6],
(int * )p_a[7] );
//shift pointers to data x direction
p_a[0]= p_a[0] + (dat0 * off0_0);
p_a[1]= p_a[1] + (dat1 * off1_0);
p_a[2]= p_a[2] + (dat2 * off2_0);
p_a[3]= p_a[3] + (dat3 * off3_0);
p_a[4]= p_a[4] + (dat4 * off4_0);
p_a[5]= p_a[5] + (dat5 * off5_0);
p_a[6]= p_a[6] + (dat6 * off6_0);
}
//shift pointers to data y direction
p_a[0]= p_a[0] + (dat0 * off0_1);
p_a[1]= p_a[1] + (dat1 * off1_1);
p_a[2]= p_a[2] + (dat2 * off2_1);
p_a[3]= p_a[3] + (dat3 * off3_1);
p_a[4]= p_a[4] + (dat4 * off4_1);
p_a[5]= p_a[5] + (dat5 * off5_1);
p_a[6]= p_a[6] + (dat6 * off6_1);
}
//shift pointers to data z direction
p_a[0]= p_a[0] + (dat0 * off0_2);
p_a[1]= p_a[1] + (dat1 * off1_2);
p_a[2]= p_a[2] + (dat2 * off2_2);
p_a[3]= p_a[3] + (dat3 * off3_2);
p_a[4]= p_a[4] + (dat4 * off4_2);
p_a[5]= p_a[5] + (dat5 * off5_2);
p_a[6]= p_a[6] + (dat6 * off6_2);
}
}
ops_timers_core(&c1,&t1);
OPS_kernels[49].time += t1-t2;
ops_set_dirtybit_host(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[49].mpi_time += t2-t1;
OPS_kernels[49].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[49].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[49].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[49].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[49].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[49].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[49].transfer += ops_compute_transfer(dim, range, &arg6);
}
示例10: ops_par_loop_accelerate_kernel
//.........这里部分代码省略.........
#else
double *p_a11 = (double *)((char *)args[11].data + base11);
#endif
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d] + OPS_sub_dat_list[args[12].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d];
#endif //OPS_MPI
int base12 = dat12 * 1 *
(start[0] * args[12].stencil->stride[0] - args[12].dat->base[0] - d_m[0]);
base12 = base12+ dat12 *
args[12].dat->size[0] *
(start[1] * args[12].stencil->stride[1] - args[12].dat->base[1] - d_m[1]);
base12 = base12+ dat12 *
args[12].dat->size[0] *
args[12].dat->size[1] *
(start[2] * args[12].stencil->stride[2] - args[12].dat->base[2] - d_m[2]);
#ifdef OPS_GPU
double *p_a12 = (double *)((char *)args[12].data_d + base12);
#else
double *p_a12 = (double *)((char *)args[12].data + base12);
#endif
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d] + OPS_sub_dat_list[args[13].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d];
#endif //OPS_MPI
int base13 = dat13 * 1 *
(start[0] * args[13].stencil->stride[0] - args[13].dat->base[0] - d_m[0]);
base13 = base13+ dat13 *
args[13].dat->size[0] *
(start[1] * args[13].stencil->stride[1] - args[13].dat->base[1] - d_m[1]);
base13 = base13+ dat13 *
args[13].dat->size[0] *
args[13].dat->size[1] *
(start[2] * args[13].stencil->stride[2] - args[13].dat->base[2] - d_m[2]);
#ifdef OPS_GPU
double *p_a13 = (double *)((char *)args[13].data_d + base13);
#else
double *p_a13 = (double *)((char *)args[13].data + base13);
#endif
#ifdef OPS_GPU
ops_H_D_exchanges_device(args, 14);
#else
ops_H_D_exchanges_host(args, 14);
#endif
ops_halo_exchanges(args,14,range);
ops_timers_core(&c1,&t1);
OPS_kernels[1].mpi_time += t1-t2;
accelerate_kernel_c_wrapper(
p_a0,
p_a1,
p_a2,
p_a3,
p_a4,
p_a5,
p_a6,
p_a7,
p_a8,
p_a9,
p_a10,
p_a11,
p_a12,
p_a13,
x_size, y_size, z_size);
ops_timers_core(&c2,&t2);
OPS_kernels[1].time += t2-t1;
#ifdef OPS_GPU
ops_set_dirtybit_device(args, 14);
#else
ops_set_dirtybit_host(args, 14);
#endif
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[8],range);
ops_set_halo_dirtybit3(&args[12],range);
//Update kernel record
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg6);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg7);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg8);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg9);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg10);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg11);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg12);
OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg13);
}
示例11: ops_par_loop_mblock_populate_kernel
//.........这里部分代码省略.........
int nthreads = omp_get_max_threads( );
#else
int nthreads = 1;
#endif
xdim0 = args[0].dat->size[0]*args[0].dat->dim;
ops_H_D_exchanges_host(args, 2);
//Halo Exchanges
ops_halo_exchanges(args,2,range);
ops_timers_core(&c2,&t2);
OPS_kernels[0].mpi_time += t2-t1;
#pragma omp parallel for
for ( int thr=0; thr<nthreads; thr++ ){
int y_size = end[1]-start[1];
char *p_a[2];
int start_i = start[1] + ((y_size-1)/nthreads+1)*thr;
int finish_i = start[1] + MIN(((y_size-1)/nthreads+1)*(thr+1),y_size);
//get address per thread
int start0 = start[0];
int start1 = start_i;
int arg_idx[2];
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0]+start0;
arg_idx[1] = sb->decomp_disp[1]+start1;
#else //OPS_MPI
arg_idx[0] = start0;
arg_idx[1] = start1;
#endif //OPS_MPI
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start0 * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start1 * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data + base0;
p_a[1] = (char *)arg_idx;
for ( int n_y=start_i; n_y<finish_i; n_y++ ){
for ( int n_x=start[0]; n_x<start[0]+(end[0]-start[0])/SIMD_VEC; n_x++ ){
//call kernel function, passing in pointers to data -vectorised
for ( int i=0; i<SIMD_VEC; i++ ){
mblock_populate_kernel( (double * )p_a[0]+ i*1, arg_idx );
arg_idx[0]++;
}
//shift pointers to data x direction
p_a[0]= p_a[0] + (dat0 * off0_0)*SIMD_VEC;
}
for ( int n_x=start[0]+((end[0]-start[0])/SIMD_VEC)*SIMD_VEC; n_x<end[0]; n_x++ ){
//call kernel function, passing in pointers to data - remainder
mblock_populate_kernel( (double * )p_a[0], arg_idx );
//shift pointers to data x direction
p_a[0]= p_a[0] + (dat0 * off0_0);
arg_idx[0]++;
}
//shift pointers to data y direction
p_a[0]= p_a[0] + (dat0 * off0_1);
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0]+start0;
#else //OPS_MPI
arg_idx[0] = start0;
#endif //OPS_MPI
arg_idx[1]++;
}
}
ops_timers_core(&c1,&t1);
OPS_kernels[0].time += t1-t2;
ops_set_dirtybit_host(args, 2);
ops_set_halo_dirtybit3(&args[0],range);
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[0].mpi_time += t2-t1;
OPS_kernels[0].transfer += ops_compute_transfer(dim, range, &arg0);
}
示例12: ops_par_loop_PdV_kernel_predict
//.........这里部分代码省略.........
}
//shift pointers to data x direction
p_a[0]= p_a[0] + (dat0 * off0_0)*SIMD_VEC;
p_a[1]= p_a[1] + (dat1 * off1_0)*SIMD_VEC;
p_a[2]= p_a[2] + (dat2 * off2_0)*SIMD_VEC;
p_a[3]= p_a[3] + (dat3 * off3_0)*SIMD_VEC;
p_a[4]= p_a[4] + (dat4 * off4_0)*SIMD_VEC;
p_a[5]= p_a[5] + (dat5 * off5_0)*SIMD_VEC;
p_a[6]= p_a[6] + (dat6 * off6_0)*SIMD_VEC;
p_a[7]= p_a[7] + (dat7 * off7_0)*SIMD_VEC;
p_a[8]= p_a[8] + (dat8 * off8_0)*SIMD_VEC;
p_a[9]= p_a[9] + (dat9 * off9_0)*SIMD_VEC;
p_a[10]= p_a[10] + (dat10 * off10_0)*SIMD_VEC;
p_a[11]= p_a[11] + (dat11 * off11_0)*SIMD_VEC;
p_a[12]= p_a[12] + (dat12 * off12_0)*SIMD_VEC;
p_a[13]= p_a[13] + (dat13 * off13_0)*SIMD_VEC;
}
for ( int n_x=start[0]+((end[0]-start[0])/SIMD_VEC)*SIMD_VEC; n_x<end[0]; n_x++ ){
//call kernel function, passing in pointers to data - remainder
PdV_kernel_predict( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2],
(double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6],
(double *)p_a[7], (double *)p_a[8], (double *)p_a[9], (double *)p_a[10],
(double *)p_a[11], (double *)p_a[12], (double *)p_a[13] );
//shift pointers to data x direction
p_a[0]= p_a[0] + (dat0 * off0_0);
p_a[1]= p_a[1] + (dat1 * off1_0);
p_a[2]= p_a[2] + (dat2 * off2_0);
p_a[3]= p_a[3] + (dat3 * off3_0);
p_a[4]= p_a[4] + (dat4 * off4_0);
p_a[5]= p_a[5] + (dat5 * off5_0);
p_a[6]= p_a[6] + (dat6 * off6_0);
p_a[7]= p_a[7] + (dat7 * off7_0);
p_a[8]= p_a[8] + (dat8 * off8_0);
p_a[9]= p_a[9] + (dat9 * off9_0);
p_a[10]= p_a[10] + (dat10 * off10_0);
p_a[11]= p_a[11] + (dat11 * off11_0);
p_a[12]= p_a[12] + (dat12 * off12_0);
p_a[13]= p_a[13] + (dat13 * off13_0);
}
//shift pointers to data y direction
p_a[0]= p_a[0] + (dat0 * off0_1);
p_a[1]= p_a[1] + (dat1 * off1_1);
p_a[2]= p_a[2] + (dat2 * off2_1);
p_a[3]= p_a[3] + (dat3 * off3_1);
p_a[4]= p_a[4] + (dat4 * off4_1);
p_a[5]= p_a[5] + (dat5 * off5_1);
p_a[6]= p_a[6] + (dat6 * off6_1);
p_a[7]= p_a[7] + (dat7 * off7_1);
p_a[8]= p_a[8] + (dat8 * off8_1);
p_a[9]= p_a[9] + (dat9 * off9_1);
p_a[10]= p_a[10] + (dat10 * off10_1);
p_a[11]= p_a[11] + (dat11 * off11_1);
p_a[12]= p_a[12] + (dat12 * off12_1);
p_a[13]= p_a[13] + (dat13 * off13_1);
}
//shift pointers to data z direction
p_a[0]= p_a[0] + (dat0 * off0_2);
p_a[1]= p_a[1] + (dat1 * off1_2);
p_a[2]= p_a[2] + (dat2 * off2_2);
p_a[3]= p_a[3] + (dat3 * off3_2);
p_a[4]= p_a[4] + (dat4 * off4_2);
p_a[5]= p_a[5] + (dat5 * off5_2);
p_a[6]= p_a[6] + (dat6 * off6_2);
p_a[7]= p_a[7] + (dat7 * off7_2);
p_a[8]= p_a[8] + (dat8 * off8_2);
p_a[9]= p_a[9] + (dat9 * off9_2);
p_a[10]= p_a[10] + (dat10 * off10_2);
p_a[11]= p_a[11] + (dat11 * off11_2);
p_a[12]= p_a[12] + (dat12 * off12_2);
p_a[13]= p_a[13] + (dat13 * off13_2);
}
ops_timers_core(&c2,&t2);
OPS_kernels[5].time += t2-t1;
ops_set_dirtybit_host(args, 14);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[8],range);
ops_set_halo_dirtybit3(&args[11],range);
//Update kernel record
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg6);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg7);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg8);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg9);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg10);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg11);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg12);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg13);
}
示例13: ops_par_loop_updateRK3_kernel
//.........这里部分代码省略.........
d_m[d] = args[7].dat->d_m[d];
#endif
int base7 = dat7 * 1 * (start0 * args[7].stencil->stride[0] -
args[7].dat->base[0] - d_m[0]);
p_a[7] = (char *)args[7].data + base7;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[8].dat->d_m[d] + OPS_sub_dat_list[args[8].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[8].dat->d_m[d];
#endif
int base8 = dat8 * 1 * (start0 * args[8].stencil->stride[0] -
args[8].dat->base[0] - d_m[0]);
p_a[8] = (char *)args[8].data + base8;
p_a[9] = (char *)args[9].data;
p_a[10] = (char *)args[10].data;
for (int n_x = start_i; n_x < start_i + (finish_i - start_i) / SIMD_VEC;
n_x++) {
// call kernel function, passing in pointers to data -vectorised
#pragma simd
for (int i = 0; i < SIMD_VEC; i++) {
updateRK3_kernel(
(double *)p_a[0] + i * 1 * 1, (double *)p_a[1] + i * 1 * 1,
(double *)p_a[2] + i * 1 * 1, (double *)p_a[3] + i * 1 * 1,
(double *)p_a[4] + i * 1 * 1, (double *)p_a[5] + i * 1 * 1,
(const double *)p_a[6] + i * 1 * 1,
(const double *)p_a[7] + i * 1 * 1,
(const double *)p_a[8] + i * 1 * 1, (double *)p_a[9],
(double *)p_a[10]);
}
// shift pointers to data x direction
p_a[0] = p_a[0] + (dat0 * off0_0) * SIMD_VEC;
p_a[1] = p_a[1] + (dat1 * off1_0) * SIMD_VEC;
p_a[2] = p_a[2] + (dat2 * off2_0) * SIMD_VEC;
p_a[3] = p_a[3] + (dat3 * off3_0) * SIMD_VEC;
p_a[4] = p_a[4] + (dat4 * off4_0) * SIMD_VEC;
p_a[5] = p_a[5] + (dat5 * off5_0) * SIMD_VEC;
p_a[6] = p_a[6] + (dat6 * off6_0) * SIMD_VEC;
p_a[7] = p_a[7] + (dat7 * off7_0) * SIMD_VEC;
p_a[8] = p_a[8] + (dat8 * off8_0) * SIMD_VEC;
}
for (int n_x = start_i + ((finish_i - start_i) / SIMD_VEC) * SIMD_VEC;
n_x < finish_i; n_x++) {
// call kernel function, passing in pointers to data - remainder
updateRK3_kernel((double *)p_a[0], (double *)p_a[1], (double *)p_a[2],
(double *)p_a[3], (double *)p_a[4], (double *)p_a[5],
(const double *)p_a[6], (const double *)p_a[7],
(const double *)p_a[8], (double *)p_a[9],
(double *)p_a[10]);
// shift pointers to data x direction
p_a[0] = p_a[0] + (dat0 * off0_0);
p_a[1] = p_a[1] + (dat1 * off1_0);
p_a[2] = p_a[2] + (dat2 * off2_0);
p_a[3] = p_a[3] + (dat3 * off3_0);
p_a[4] = p_a[4] + (dat4 * off4_0);
p_a[5] = p_a[5] + (dat5 * off5_0);
p_a[6] = p_a[6] + (dat6 * off6_0);
p_a[7] = p_a[7] + (dat7 * off7_0);
p_a[8] = p_a[8] + (dat8 * off8_0);
}
}
if (OPS_diags > 1) {
ops_timers_core(&c1, &t1);
OPS_kernels[6].time += t1 - t2;
}
ops_set_dirtybit_host(args, 11);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[6].mpi_time += t2 - t1;
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg8);
}
}
示例14: ops_par_loop_left_bndcon
//.........这里部分代码省略.........
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
#ifdef OPS_DEBUG
ops_register_args(args, "left_bndcon");
#endif
offs[0][0] = args[0].stencil->stride[0]*1; //unit step in x dimension
offs[0][1] = off2D(1, &start[0],
&end[0],args[0].dat->size, args[0].stencil->stride) - offs[0][0];
int arg_idx[2];
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0]+start[0];
arg_idx[1] = sb->decomp_disp[1]+start[1];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
#endif
int off0_0 = offs[0][0];
int off0_1 = offs[0][1];
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
//set up initial pointers and exchange halos if necessary
int base0 = args[0].dat->base_offset + (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) * start[0] * args[0].stencil->stride[0];
base0 = base0+ (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
args[0].dat->size[0] *
start[1] * args[0].stencil->stride[1];
p_a[0] = (char *)args[0].data + base0;
p_a[1] = (char *)arg_idx;
//initialize global variable with the dimension of dats
xdim0 = args[0].dat->size[0];
//Halo Exchanges
ops_H_D_exchanges_host(args, 2);
ops_halo_exchanges(args,2,range);
ops_H_D_exchanges_host(args, 2);
if (OPS_diags > 1) {
ops_timers_core(&c1,&t1);
OPS_kernels[2].mpi_time += t1-t2;
}
int n_x;
for ( int n_y=start[1]; n_y<end[1]; n_y++ ){
#pragma novector
for( n_x=start[0]; n_x<start[0]+((end[0]-start[0])/SIMD_VEC)*SIMD_VEC; n_x+=SIMD_VEC ) {
//call kernel function, passing in pointers to data -vectorised
for ( int i=0; i<SIMD_VEC; i++ ){
left_bndcon( (double *)p_a[0]+ i*1*1, (int *)p_a[1] );
arg_idx[0]++;
}
//shift pointers to data x direction
p_a[0]= p_a[0] + (dat0 * off0_0)*SIMD_VEC;
}
for ( int n_x=start[0]+((end[0]-start[0])/SIMD_VEC)*SIMD_VEC; n_x<end[0]; n_x++ ){
//call kernel function, passing in pointers to data - remainder
left_bndcon( (double *)p_a[0], (int *)p_a[1] );
//shift pointers to data x direction
p_a[0]= p_a[0] + (dat0 * off0_0);
arg_idx[0]++;
}
//shift pointers to data y direction
p_a[0]= p_a[0] + (dat0 * off0_1);
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0]+start[0];
#else
arg_idx[0] = start[0];
#endif
arg_idx[1]++;
}
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[2].time += t2-t1;
}
ops_set_dirtybit_host(args, 2);
ops_set_halo_dirtybit3(&args[0],range);
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c1,&t1);
OPS_kernels[2].mpi_time += t1-t2;
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
示例15: ops_par_loop_PdV_kernel_predict
//.........这里部分代码省略.........
base11 = base11 + args[11].dat->size[0] *
(start[1] * args[11].stencil->stride[1] - args[11].dat->base[1] - d_m[1]);
base11 = base11 + args[11].dat->size[0] * args[11].dat->size[1] *
(start[2] * args[11].stencil->stride[2] - args[11].dat->base[2] - d_m[2]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d] + OPS_sub_dat_list[args[12].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d];
#endif //OPS_MPI
int base12 = 1 *
(start[0] * args[12].stencil->stride[0] - args[12].dat->base[0] - d_m[0]);
base12 = base12 + args[12].dat->size[0] *
(start[1] * args[12].stencil->stride[1] - args[12].dat->base[1] - d_m[1]);
base12 = base12 + args[12].dat->size[0] * args[12].dat->size[1] *
(start[2] * args[12].stencil->stride[2] - args[12].dat->base[2] - d_m[2]);
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d] + OPS_sub_dat_list[args[13].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d];
#endif //OPS_MPI
int base13 = 1 *
(start[0] * args[13].stencil->stride[0] - args[13].dat->base[0] - d_m[0]);
base13 = base13 + args[13].dat->size[0] *
(start[1] * args[13].stencil->stride[1] - args[13].dat->base[1] - d_m[1]);
base13 = base13 + args[13].dat->size[0] * args[13].dat->size[1] *
(start[2] * args[13].stencil->stride[2] - args[13].dat->base[2] - d_m[2]);
ops_H_D_exchanges_device(args, 14);
ops_halo_exchanges(args,14,range);
ops_H_D_exchanges_device(args, 14);
ops_timers_core(&c1,&t1);
OPS_kernels[5].mpi_time += t1-t2;
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 0, sizeof(cl_mem), (void*) &arg0.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 1, sizeof(cl_mem), (void*) &arg1.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 2, sizeof(cl_mem), (void*) &arg2.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 3, sizeof(cl_mem), (void*) &arg3.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 4, sizeof(cl_mem), (void*) &arg4.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 5, sizeof(cl_mem), (void*) &arg5.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 6, sizeof(cl_mem), (void*) &arg6.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 7, sizeof(cl_mem), (void*) &arg7.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 8, sizeof(cl_mem), (void*) &arg8.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 9, sizeof(cl_mem), (void*) &arg9.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 10, sizeof(cl_mem), (void*) &arg10.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 11, sizeof(cl_mem), (void*) &arg11.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 12, sizeof(cl_mem), (void*) &arg12.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 13, sizeof(cl_mem), (void*) &arg13.data_d ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 14, sizeof(cl_double), (void*) &dt ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 15, sizeof(cl_int), (void*) &base0 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 16, sizeof(cl_int), (void*) &base1 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 17, sizeof(cl_int), (void*) &base2 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 18, sizeof(cl_int), (void*) &base3 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 19, sizeof(cl_int), (void*) &base4 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 20, sizeof(cl_int), (void*) &base5 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 21, sizeof(cl_int), (void*) &base6 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 22, sizeof(cl_int), (void*) &base7 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 23, sizeof(cl_int), (void*) &base8 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 24, sizeof(cl_int), (void*) &base9 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 25, sizeof(cl_int), (void*) &base10 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 26, sizeof(cl_int), (void*) &base11 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 27, sizeof(cl_int), (void*) &base12 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 28, sizeof(cl_int), (void*) &base13 ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 29, sizeof(cl_int), (void*) &x_size ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 30, sizeof(cl_int), (void*) &y_size ));
clSafeCall( clSetKernelArg(OPS_opencl_core.kernel[5], 31, sizeof(cl_int), (void*) &z_size ));
//call/enque opencl kernel wrapper function
clSafeCall( clEnqueueNDRangeKernel(OPS_opencl_core.command_queue, OPS_opencl_core.kernel[5], 3, NULL, globalWorkSize, localWorkSize, 0, NULL, NULL) );
if (OPS_diags>1) {
clSafeCall( clFinish(OPS_opencl_core.command_queue) );
}
ops_set_dirtybit_device(args, 14);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[8],range);
ops_set_halo_dirtybit3(&args[11],range);
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[5].time += t2-t1;
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg6);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg7);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg8);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg9);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg10);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg11);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg12);
OPS_kernels[5].transfer += ops_compute_transfer(dim, range, &arg13);
}