本文整理汇总了C++中Force::compute方法的典型用法代码示例。如果您正苦于以下问题:C++ Force::compute方法的具体用法?C++ Force::compute怎么用?C++ Force::compute使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Force
的用法示例。
在下文中一共展示了Force::compute方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
//.........这里部分代码省略.........
force.cutforce = in.force_cut;
thermo.nstat = in.thermo_nstat;
if(me == 0)
printf("# Create System:\n");
if(in.datafile) {
read_lammps_data(atom, comm, neighbor, integrate, thermo, in.datafile, in.units);
MMD_float volume = atom.box.xprd * atom.box.yprd * atom.box.zprd;
in.rho = 1.0 * atom.natoms / volume;
force.setup();
} else {
create_box(atom, in.nx, in.ny, in.nz, in.rho);
comm.setup(neighbor.cutneigh, atom);
neighbor.setup(atom);
integrate.setup();
force.setup();
create_atoms(atom, in.nx, in.ny, in.nz, in.rho);
thermo.setup(in.rho, integrate, atom, in.units);
create_velocity(in.t_request, atom, thermo);
}
if(me == 0)
printf("# Done .... \n");
if(me == 0) {
fprintf(stdout, "# " VARIANT_STRING " output ...\n");
fprintf(stdout, "# Systemparameters: \n");
fprintf(stdout, "\t# MPI processes: %i\n", neighbor.threads->mpi_num_threads);
fprintf(stdout, "\t# OpenMP threads: %i\n", neighbor.threads->omp_num_threads);
fprintf(stdout, "\t# Inputfile: %s\n", input_file == 0 ? "in.lj.miniMD" : input_file);
fprintf(stdout, "\t# Datafile: %s\n", in.datafile ? in.datafile : "None");
fprintf(stdout, "\t# ForceStyle: %s\n", in.forcetype == FORCELJ ? "LJ" : "EAM");
fprintf(stdout, "\t# Units: %s\n", in.units == 0 ? "LJ" : "METAL");
fprintf(stdout, "\t# Atoms: %i\n", atom.natoms);
fprintf(stdout, "\t# System size: %2.2lf %2.2lf %2.2lf (unit cells: %i %i %i)\n", atom.box.xprd, atom.box.yprd, atom.box.zprd, in.nx, in.ny, in.nz);
fprintf(stdout, "\t# Density: %lf\n", in.rho);
fprintf(stdout, "\t# Force cutoff: %lf\n", force.cutforce);
fprintf(stdout, "\t# Neigh cutoff: %lf\n", neighbor.cutneigh);
fprintf(stdout, "\t# Half neighborlists: %i\n", neighbor.halfneigh);
fprintf(stdout, "\t# Neighbor bins: %i %i %i\n", neighbor.nbinx, neighbor.nbiny, neighbor.nbinz);
fprintf(stdout, "\t# Neighbor frequency: %i\n", neighbor.every);
fprintf(stdout, "\t# Timestep size: %lf\n", integrate.dt);
fprintf(stdout, "\t# Thermo frequency: %i\n", thermo.nstat);
fprintf(stdout, "\t# Ghost Newton: %i\n", ghost_newton);
fprintf(stdout, "\t# Use SSE intrinsics: %i\n", force.use_sse);
fprintf(stdout, "\t# Do safe exchange: %i\n", comm.do_safeexchange);
fprintf(stdout, "\t# Size of float: %i\n\n",sizeof(MMD_float));
}
comm.exchange(atom);
comm.borders(atom);
atom.d_x->upload();
atom.d_v->upload();
//atom.d_vold->upload();
neighbor.build(atom);
if (me == 0) printf("# Starting dynamics ...\n");
if (me == 0) printf("# Timestep T U P Time\n");
thermo.compute(0,atom,neighbor,force,timer,comm);
force.compute(atom,neighbor,comm.me);
timer.barrier_start(TIME_TOTAL);
integrate.run(atom,force,neighbor,comm,thermo,timer);
timer.barrier_stop(TIME_TOTAL);
int natoms;
MPI_Allreduce(&atom.nlocal,&natoms,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
thermo.compute(-1,atom,neighbor,force,timer,comm);
if(me == 0) {
double time_other=timer.array[TIME_TOTAL]-timer.array[TIME_FORCE]-timer.array[TIME_NEIGH]-timer.array[TIME_COMM];
printf("\n\n");
printf("# Performance Summary:\n");
printf("# MPI_proc OMP_threads nsteps natoms t_total t_force t_neigh t_comm t_other performance perf/thread grep_string t_extra\n");
printf("%i %i %i %i %lf %lf %lf %lf %lf %lf %lf PERF_SUMMARY %lf\n\n\n",
nprocs,num_threads,integrate.ntimes,natoms,
timer.array[TIME_TOTAL],timer.array[TIME_FORCE],timer.array[TIME_NEIGH],timer.array[TIME_COMM],time_other,
1.0*natoms*integrate.ntimes/timer.array[TIME_TOTAL],1.0*natoms*integrate.ntimes/timer.array[TIME_TOTAL]/nprocs/num_threads,timer.array[TIME_TEST]);
}
if(yaml_output)
output(in,atom,force,neighbor,comm,thermo,integrate,timer,screen_yaml);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
delete opencl;
return 0;
}
示例2: main
//.........这里部分代码省略.........
ghost_newton = 0;
}
}
if(in.forcetype == FORCELJ) force = (Force*) new ForceLJ();
threads.mpi_me = me;
threads.mpi_num_threads = nprocs;
threads.omp_me = 0;
threads.omp_num_threads = num_threads;
atom.threads = &threads;
comm.threads = &threads;
force->threads = &threads;
integrate.threads = &threads;
neighbor.threads = &threads;
thermo.threads = &threads;
force->epsilon = in.epsilon;
force->sigma = in.sigma;
force->sigma6 = in.sigma*in.sigma*in.sigma*in.sigma*in.sigma*in.sigma;
neighbor.ghost_newton = ghost_newton;
omp_set_num_threads(num_threads);
neighbor.timer = &timer;
force->timer = &timer;
comm.check_safeexchange = check_safeexchange;
comm.do_safeexchange = do_safeexchange;
force->use_sse = use_sse;
neighbor.halfneigh = halfneigh;
if(halfneigh < 0) force->use_oldcompute = 1;
if(use_sse) {
#ifdef VARIANT_REFERENCE
if(me == 0) printf("ERROR: Trying to run with -sse with miniMD reference version. Use SSE variant instead. Exiting.\n");
MPI_Finalize();
exit(0);
#endif
}
if(num_steps > 0) in.ntimes = num_steps;
if(system_size > 0) {
in.nx = system_size;
in.ny = system_size;
in.nz = system_size;
}
if(nx > 0) {
in.nx = nx;
if(ny > 0)
in.ny = ny;
else if(system_size < 0)
in.ny = nx;
if(nz > 0)
in.nz = nz;
else if(system_size < 0)
in.nz = nx;
}