本文整理汇总了C++中mpi::communicator类的典型用法代码示例。如果您正苦于以下问题:C++ communicator类的具体用法?C++ communicator怎么用?C++ communicator使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了communicator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
void Simulation3DInitializer::setOffsets(const mpi::communicator & xLine,
const mpi::communicator & yLine,
const mpi::communicator & zLine) {
x_offset=(dx*blockSize*xLine.rank());
y_offset=(dy*blockSize*yLine.rank());
z_offset=(dz*blockSize*zLine.rank());
}
示例2:
MPISlave::MPISlave(const mpi::communicator& comm, const Experiment& exp)
: m_comm(comm)
, m_exp(exp)
, m_resonanceField(m_exp)
{
if (comm.rank() == MASTER_RANK) {
cerr << "MPISlave created in master rank!" << endl;
comm.abort(1);
return;
}
}
示例3: mpi_broadcast
template <typename A> REQUIRES_IS_ARRAY mpi_broadcast(A &a, mpi::communicator c = {}, int root = 0) {
if (!has_contiguous_data(a)) TRIQS_RUNTIME_ERROR << "Non contiguous view in mpi_broadcast";
auto sh = a.shape();
MPI_Bcast(&sh[0], sh.size(), mpi::mpi_datatype<typename decltype(sh)::value_type>(), root, c.get());
if (c.rank() != root) resize_or_check_if_view(a, sh);
MPI_Bcast(a.data_start(), a.domain().number_of_elements(), mpi::mpi_datatype<typename A::value_type>(), root, c.get());
}
示例4: test_heuristic_strategy_2
/**
* Heuristic strategy 2:
* Select randomly the first 4 numbers, then I look for
* the others cells as consequence of this selection.
* I have 8 equations and 9 variables.
* But, I need only 5 variables and the others can be
* calculated from those.
* @strategy
*/
void test_heuristic_strategy_2(mpi::communicator world, int limit) {
// primes number data structure
ms_vector primes;
// vector to collecting all generated matrix
vector<ms_matrix> list;
// my rank
int rank = world.rank();
if (rank == 0) {
cout << "Test the heuristic strategy 2...\n";
}
// generate primes numbers
find_prime_numbers(world, limit, &primes);
// send to all the prime numbers
mpi::broadcast(world, primes, 0);
int length = 3;
ms_matrix matrix(length, ms_vector(length));
fill_in_heuristic_mode_2(&primes, &matrix, rank);
// receive all generated matrix
mpi::gather(world, matrix, list, 0);
if (rank == 0) {
// print all generated matrix
cout << "Print all generated matrix:\n";
print_list_matrix(list);
}
}
示例5: product_mpi
void product_mpi (mpi::communicator world,
real2D* matrix, /* to multiply by */
real1D* vector, /* to be multiplied */
real1D* result, /* result of multiply */
int nr, /* row size */
int nc) /* column size */
{
int lo, hi; /* work controls */
int r, c; /* loop indices */
int rank;
// work
if (get_block_rows_mpi (world, 0, nr, &lo, &hi)) {
for (r = lo; r < hi; r ++) {
result[r] = matrix[r][0] * vector[0];
for (c = 1; c < nc; c++) {
result[r] += matrix[r][c] * vector[c];
}
}
}
// broadcast result
for (rank = 0; rank < world.size (); rank++) {
if (get_block_rows_mpi (world, 0, nr, &lo, &hi, rank)) {
broadcast (world, &result[lo], hi - lo, rank);
}
}
}
示例6: determineBoundary
BoundaryLocation determineBoundary(mpi::communicator& world) {
if(world.rank() == 0 && world.rank() == world.size()-1)
return DOUBLE_BDY;
else if(world.rank() == 0)
return LOWER_BDY;
else if(world.rank() == world.size() - 1)
return UPPER_BDY;
return NO_BDY;
}
示例7: runPartialBatch
std::string runPartialBatch(mpi::communicator world, boost::shared_ptr< MatcherInterface > &matcher, ReadSet &_contigs, std::string _contigFile, ReadSet & changedContigs,
ReadSet & finalContigs, int batchIdx, int maxContigsPerBatch, SequenceLengthType minKmerSize,
double minimumCoverage, SequenceLengthType maxKmerSize,
SequenceLengthType maxExtend, SequenceLengthType kmerStep) {
LOG_DEBUG(1, "Starting runPartialBatch(" << batchIdx << " of " << _contigs.getSize() << "): " << MemoryUtils::getMemoryUsage());
ReadSet contigs; // new global contigs file a subset of original
std::string extendLog;
for(int i = batchIdx; i < (int) _contigs.getSize() && i < batchIdx + maxContigsPerBatch; i++)
contigs.append(_contigs.getRead(i));
setGlobalReadSetConstants(world, contigs);
if (contigs.getGlobalSize() == 0)
return extendLog;
std::string contigFile = DistributedOfstreamMap::writeGlobalReadSet(world, contigs, UniqueName::generateUniqueGlobalName(".tmp-batch" + UniqueName::getOurUniqueHandle() + "-", batchIdx), ".fasta", FormatOutput::Fasta());
MatcherInterface::MatchReadResults contigReadSet = matcher->match(contigs, contigFile);
assert(contigs.getSize() == contigReadSet.size());
LOG_VERBOSE_OPTIONAL(1, world.rank() == 0, " batch " << contigs.getSize() << ". Matches made");
int numThreads = omp_get_max_threads();
std::string extendLogs[numThreads];
if (!Cap3Options::getOptions().getCap3Path().empty()) {
Cap3 cap3Instances[numThreads];
#pragma omp parallel for
for(int i = 0; i < numThreads; i++) {
extendLogs[i] = cap3Instances[i].extendContigs(contigs, contigReadSet, changedContigs, finalContigs, minimumCoverage, i, numThreads);
}
} else if (!NewblerOptions::getOptions().getNewblerPath().empty()) {
Newbler newblerInstances[numThreads];
#pragma omp parallel for
for(int i = 0; i < numThreads; i++) {
extendLogs[i] = newblerInstances[i].extendContigs(contigs, contigReadSet, changedContigs, finalContigs, minimumCoverage, i, numThreads);
}
} else {
extendLog = extendContigsWithContigExtender(contigs, contigReadSet,
changedContigs, finalContigs,
minKmerSize, minimumCoverage, maxKmerSize, maxExtend, kmerStep);
}
for(int i = 0; i < numThreads; i++)
extendLog += extendLogs[i];
unlink(contigFile.c_str());
return extendLog;
}
示例8: part_vertices
ParallelBFS::ParallelBFS(const mpi::communicator &comm,
const NodeList &vertices,
const NodeList &edges) :
comm(comm) {
NodeId part = (NodeId)vertices.size() / comm.size(),
left_vertices = (NodeId)vertices.size() % comm.size(),
first_vertex = 0, first_edge = 0;
NodeList part_vertices((size_t)comm.size());
NodeList first_vertices((size_t)comm.size());
NodeList part_edges((size_t)comm.size());
NodeList first_edges((size_t)comm.size());
NodeList all_description((size_t)(comm.size() << 2));
for (int i = 0; i < comm.size(); ++i) {
NodeId this_part = part + (i < left_vertices);
NodeId last_edge = first_vertex + this_part == vertices.size() ?
(NodeId)edges.size() :
vertices[first_vertex + this_part];
all_description[(i<<2)] = (NodeId)vertices.size();
all_description[(i<<2) + 1] = first_vertices[i] = first_vertex;
all_description[(i<<2) + 2] = part_vertices[i] = this_part;
all_description[(i<<2) + 3] = part_edges[i] = last_edge - first_edge;
first_edges[i] = first_edge;
first_edge = last_edge;
first_vertex += this_part;
}
NodeList description(4);
mpi::scatter(comm, all_description.data(), description.data(), 4, 0);
this->vertex_total_count = description[0];
this->first_vertex = description[1];
this->vertices.resize((size_t)description[2]);
mpi::scatterv(comm, vertices, part_vertices, first_vertices,
this->vertices, 0);
this->edges.resize((size_t)description[3]);
mpi::scatterv(comm, edges, part_edges, first_edges,
this->edges, 0);
prepare();
}
示例9: execute
void Manager::execute(mpi::communicator slave, std::string masterComputer, std::string cheminPere) {
mpi::communicator world;
// for each floor
for (std::vector<std::vector<Rule *> >::iterator it1 = building.begin(); it1 != building.end(); ++it1) {
// for each rule
for (std::vector<Rule *>::iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
// execute rule if it's our turn
if ((currentRank % (world.size()-1))+1 == world.rank()) {
std::cout << printCurrentThread() << "executing " << (*it2)->get_name() << std::endl;
(*it2)->execute(dictionary, masterComputer, cheminPere);
std::cout << printCurrentThread() << "finished " << (*it2)->get_name() << std::endl;
}
currentRank++;
}
slave.barrier();
}
//Envoie d'un message pour dire que c'est finis
std::stringstream messageSend;
messageSend << world.rank() << ";done";
world.send(0, 0, messageSend.str());
}
示例10: find_prime_numbers
/**
* find prime numbers in a range between [2,limit]
*
* http://en.wikipedia.org/wiki/Sieve_of_Atkin
*
* @param limit upper limit
* @param is_prime return a array[limit+1] with a representation of number (if is_prime[n] == true then n is prime, false otherwise)
*/
void find_prime_numbers(mpi::communicator world, int limit, ms_vector *primes) {
int sqrt_limit = ceil(sqrt(limit));
vector<bool> is_prime(limit + 1, false);
vector<vector<bool> > matrix_is_prime(world.size());
is_prime[2] = true;
is_prime[3] = true;
int size = world.size();
// if the number of process > sqrt_limit
if (size > sqrt_limit)
// simulate to have sqrt_limit processes
size = sqrt_limit;
// compute how many numbers scan for each process
int howmuch = sqrt_limit / size;
// compute where the process start to look
int start = 1 + (howmuch * world.rank());
// compute where the process stop to look
int stop = howmuch * (world.rank() + 1);
// if stop is out of limit, set stop as limit
if (stop > limit)
stop = limit;
// execute algorithm
for (int x = start; x <= stop; x++) {
# pragma omp parallel for default(none) shared(sqrt_limit, limit, is_prime, x)
for (int y = 1; y <= sqrt_limit; y++) {
int n = 4 * x * x + y * y;
if (n <= limit && ((n % 12) == 1 || (n % 12) == 5)){
# pragma omp critical
{
is_prime[n] = !is_prime[n];
}
}
n = 3 * x * x + y * y;
if (n <= limit && (n % 12) == 7){
# pragma omp critical
{
is_prime[n] = !is_prime[n];
}
}
n = 3 * x * x - y * y;
if (x > y && n <= limit && (n % 12) == 11){
# pragma omp critical
{
is_prime[n] = !is_prime[n];
}
}
}
}
// gather: receive all generated matrix
mpi::gather(world, is_prime, matrix_is_prime, 0);
// rott process finalize the algorithm
if (world.rank() == 0) {
// take the last update
for (unsigned int i = 1; i < matrix_is_prime.size(); i++) {
# pragma omp parallel for default(none) shared(matrix_is_prime, limit, i)
for (int j = 1; j <= limit; j++) {
if (matrix_is_prime[i - 1][j]) {
# pragma omp critical
{
matrix_is_prime[i][j] = !matrix_is_prime[i][j];
}
}
}
}
// remove the others no prime numbers
int index = matrix_is_prime.size() - 1;
# pragma omp parallel for default(none) shared(sqrt_limit, matrix_is_prime, limit, index)
for (int n = 5; n <= sqrt_limit; n++) {
if (matrix_is_prime[index][n]) {
int k = n * n;
for (int i = k; i <= limit; i += k) {
# pragma omp critical
{
matrix_is_prime[index][i] = false;
}
}
}
}
// put number 2 and 3
//.........这里部分代码省略.........
示例11: mpi_scatter
/// Scatter a mesh over the communicator c
friend gf_mesh mpi_scatter(gf_mesh m, mpi::communicator c, int root) {
auto m2 = gf_mesh{m.domain(), m.size(), m.positive_only()};
std::tie(m2._first_index_window, m2._last_index_window) = mpi::slice_range(m2._first_index, m2._last_index, c.size(), c.rank());
return m2;
}
示例12: simrun_slave
void simrun_slave( const sim_parameters& par,const mpi::communicator& mpicomm)
{
Replica* rep = new Replica(par);
if ( rep->prepare( par.init ) == false ) {
delete rep;
}
// perform dry runs to reach thermal equilibrium
rep->mcstep_dry( par.drysweeps );
unsigned int completed_bins_thisslave = 0;
bool master_out_of_work = false;
unsigned int scheduled_bins_thisslave;
mpicomm.send( 0, MSGTAG_S_M_REQUEST_BINS );
mpicomm.recv( 0, MSGTAG_M_S_DISPATCHED_BINS, scheduled_bins_thisslave );
master_out_of_work = ( scheduled_bins_thisslave == 0 );
std::vector<double> q2_binmeans;
std::vector<double> q4_binmeans;
while ( scheduled_bins_thisslave > 0 ) {
unsigned int new_scheduled_bins_thisslave;
mpi::request master_answer;
if ( !master_out_of_work ) {
// ask the master for more work
mpicomm.send( 0, MSGTAG_S_M_REQUEST_BINS );
master_answer = mpicomm.irecv(
0, MSGTAG_M_S_DISPATCHED_BINS,
new_scheduled_bins_thisslave
);
}
// initialize binning array
vector<double> q2_currentbin;
vector<double> q4_currentbin;
try {
// try to allocate enough memory ...
q2_currentbin.reserve( par.binwidth );
q4_currentbin.reserve( par.binwidth );
} catch ( bad_alloc ) {
delete rep;
}
for (unsigned int mcs = 0;mcs < par.binwidth;++mcs ) {
// perform a Monte Carlo step
rep->mcs();
// measure observables
double q2 = 0, q4 = 0;
double thissample_q = rep->Q();
// remember the sample's properties to calculate their mean value
q2 = thissample_q * thissample_q;
q4 = thissample_q * thissample_q * thissample_q * thissample_q;
q2_currentbin.push_back(q2);
q4_currentbin.push_back(q4);
}
q2_binmeans.push_back(
accumulate( q2_currentbin.begin(), q2_currentbin.end(), 0.0 ) /
static_cast<double>( q2_currentbin.size() )
);
q2_currentbin.clear();
// report completion of the work
mpicomm.send( 0, 2 );
++completed_bins_thisslave;
--scheduled_bins_thisslave;
if ( !master_out_of_work ) {
// wait for answer from master concerning the next bin
master_answer.wait();
if ( new_scheduled_bins_thisslave == 1 ) {
++scheduled_bins_thisslave;
} else {
master_out_of_work = true;
}
}
}
assert( mpicomm.rank() != 0 );
mpi::gather( mpicomm, q2_binmeans, 0 );
return;
}
示例13: outer_mpi
void
outer_mpi(mpi::communicator world,
pt1D* ptVec, /* vector of points */
real2D* matrix, /* matrix to fill */
real1D* realVec, /* vector to fill */
int n /* size */
){
int lo, hi; /* work controls */
int r, c; /* loop indices */
real d; /* distance */
real d_max_local = -1.0; // maximum distance
real d_max; // maximum distance
bool work; /* do useful work? */
int i, j;
/* all elements except matrix diagonal */
work = get_block_rows_mpi (world, 0, n, &lo, &hi);
if (work) {
for (r = lo; r < hi; r++) {
realVec[r] = ptMag(&(ptVec[r]));
for (c = 0; c < r; c++) {
d = ptDist (&(ptVec[r]), &(ptVec[c]));
if (d > d_max_local) {
d_max_local = d;
}
// fill columns 0 to r only
matrix[r][c] = d;
}
}
}
// reduce to maximum d's
all_reduce (world, d_max_local, d_max, mpi::maximum<real>());
/* matrix diagonal */
d = d_max * n;
if (work) {
for (r = lo; r < hi; r++) {
matrix[r][r] = d;
}
}
// broadcast matrix, realVec
for (i = 0; i < world.size (); i++) {
if (get_block_rows_mpi (world, 0, n, &lo, &hi, i)) {
broadcast (world, &realVec[lo], hi - lo, i);
// broadcast row by row since n may be smaller than MAXEXT
for (j = lo; j < hi; j++) {
broadcast (world, matrix[j], n, i);
}
}
}
// fill in the rest to make symmetric matrix
for (r = 0; r < n; r++) {
for (c = 0; c < r; c++) {
matrix[c][r] = matrix[r][c];
}
}
/* return */
}
示例14: world
Simulation3D::Simulation3D(double L_x, double L_y, double L_z,
double T,
unsigned int n_cells, unsigned int n_steps,
unsigned int procs_x, unsigned int procs_y, unsigned int procs_z,
unsigned int block_size,
std::string& dump_dir,
Simulation3DInitializer* init, mpi::communicator & world) :
world(world),
xLine(world.split(world.rank() / procs_x)),
yLine(world.split(world.rank() % procs_x + (world.rank() / (procs_x*procs_y)) * procs_x)),
zLine(world.split(world.rank() % (procs_x*procs_y))),
nSteps(n_steps),
currentStep(0),
dx(L_x/n_cells),
dy(L_y/n_cells),
dz(L_z/n_cells),
dt(T/n_steps),
blockSize(block_size),
preFactorX(LIGHTSPEED*dt/(2*dx)),
preFactorY(LIGHTSPEED*dt/(2*dy)),
preFactorZ(LIGHTSPEED*dt/(2*dz)),
E(new double[3*blockSize*blockSize*blockSize]),
B(new double[3*blockSize*blockSize*blockSize]),
tmp_field(new double[3*blockSize*blockSize*blockSize]),
rhsx(new double[blockSize*blockSize*blockSize]),
rhsy(new double[blockSize*blockSize*blockSize]),
rhsz(new double[blockSize*blockSize*blockSize]),
rhs_ptrs_x(new double*[blockSize*blockSize]),
rhs_ptrs_y(new double*[blockSize*blockSize]),
rhs_ptrs_z(new double*[blockSize*blockSize]),
dumpDir(dump_dir)
{
procsX = xLine.size();
procsY = yLine.size();
procsZ = zLine.size();
VacuumMatrixInitializer mat_init_x = VacuumMatrixInitializer(dx, dt, blockSize, determineBoundary(xLine));
VacuumMatrixInitializer mat_init_y = VacuumMatrixInitializer(dy, dt, blockSize, determineBoundary(yLine));
VacuumMatrixInitializer mat_init_z = VacuumMatrixInitializer(dz, dt, blockSize, determineBoundary(zLine));
VacuumCouplingInitializer coupling_init_x = VacuumCouplingInitializer(& mat_init_x, blockSize, xLine);
VacuumCouplingInitializer coupling_init_y = VacuumCouplingInitializer(& mat_init_y, blockSize, yLine);
VacuumCouplingInitializer coupling_init_z = VacuumCouplingInitializer(& mat_init_z, blockSize, zLine);
std::vector<AbstractMatrixInitializer*> mat_inits_x(blockSize*blockSize, & mat_init_x);
std::vector<AbstractMatrixInitializer*> mat_inits_y(blockSize*blockSize, & mat_init_y);
std::vector<AbstractMatrixInitializer*> mat_inits_z(blockSize*blockSize, & mat_init_z);
std::vector<AbstractCouplingInitializer*> coupling_inits_x(blockSize*blockSize, & coupling_init_x);
std::vector<AbstractCouplingInitializer*> coupling_inits_y(blockSize*blockSize, & coupling_init_y);
std::vector<AbstractCouplingInitializer*> coupling_inits_z(blockSize*blockSize, & coupling_init_z);
guardB = allocateGuardStorage();
guardE = allocateGuardStorage();
init->setOffsets(xLine, yLine, zLine);
initFields(init);
xUpdateRHSs = init->initCollection(mat_inits_x, coupling_inits_x, blockSize, xLine);
yUpdateRHSs = init->initCollection(mat_inits_y, coupling_inits_y, blockSize, yLine);
zUpdateRHSs = init->initCollection(mat_inits_z, coupling_inits_z, blockSize, zLine);
guardSendbuf = new double[3*blockSize*blockSize];
}
示例15: initialize_new_objects
void initialize_new_objects(mpi::communicator& world,
parameter_t const& P, directory_structure_t const& ds,
geometric_info_t const& gi, object_info_t& oi,
vector<std::vector<std::string> > const &seq, int tt,
vector<CImg<unsigned char> > const& images,
vector<matrix<float> > const& grd,
vector<matrix<float> >& detected_rects)
{
int Ncam = seq.size();
vector<object_trj_t> & trlet_list=oi.trlet_list;
int nobj = trlet_list.size();
int num_new_obj = detected_rects(0).size1();
int T = seq[0].size();
int np = oi.model.size();
int num_scales = P.scales.size();
//std::cout<<"detected_rects="<<detected_rects<<std::endl;
for(int oo=0; oo<num_new_obj; ++oo)
{
int nn = oi.curr_num_obj + oo;
trlet_list(nn).startt = tt;
trlet_list(nn).endt = tt;
trlet_list(nn).state = 1;
trlet_list(nn).trj = vector<matrix<float> >(Ncam);
for(int cam=0; cam<Ncam; ++cam)
{
trlet_list(nn).trj(cam) = scalar_matrix<float>(T, 4, 0);
}
trlet_list(nn).trj_3d = scalar_matrix<float>(T, 2, 0);
trlet_list(nn).hist_p = vector<matrix<float> >(Ncam);
trlet_list(nn).hist_q = vector<matrix<float> >(Ncam);
trlet_list(nn).fscores = vector<matrix<float> >(Ncam);
trlet_list(nn).scores = scalar_matrix<float>(Ncam, T, 0);
vector<candidate_array<Float> > cand_array(Ncam);
for(int cam=0; cam<Ncam; ++cam)
{
trlet_list(nn).fscores(cam) = scalar_matrix<float>(np*2, T, 0);
float w = detected_rects(cam)(oo, 2)-detected_rects(cam)(oo, 0);
float h = detected_rects(cam)(oo, 3)-detected_rects(cam)(oo, 1);
row(trlet_list(nn).trj(cam), tt) = row(detected_rects(cam), oo);
matrix<float> rects;
compute_part_rects(detected_rects(cam)(oo, 0), detected_rects(cam)(oo, 1),
w, h, oi.model, rects);
pmodel_t pmodel;
vector<float> br(row(detected_rects(cam), oo));
rects_to_pmodel_geom(br, gi.horiz_mean, pmodel);
oi.pmodel_list(cam, nn) = pmodel;
//collect_sift(grd(cam), );
matrix<float> hist_p, hist_q;
collect_hist(images(cam), rects, hist_p, hist_q);
trlet_list(nn).hist_p(cam) = hist_p;
trlet_list(nn).hist_q(cam) = hist_q;
matrix<Float> cand_rects;
vector<Float> cand_scale;
matrix<int> cand_ijs;
if(0==world.rank())
{
std::vector<float> sxr, syr;
for(float v=-P.xrange/2; v<=P.xrange/2; v+=P.xstep)
{
sxr.push_back(v);
}
for(float v=-P.yrange/2; v<=P.yrange/2; v+=P.ystep)
{
syr.push_back(v);
}
vector<float> xr(sxr.size()), yr(syr.size());
std::copy(sxr.begin(), sxr.end(), xr.begin());
std::copy(syr.begin(), syr.end(), yr.begin());
float feetx = (trlet_list(nn).trj(cam)(tt, 0)
+trlet_list(nn).trj(cam)(tt, 2))/2;
float feety = trlet_list(nn).trj(cam)(tt, 3);
enumerate_rects_inpoly(images(cam), oi.pmodel_list(cam, nn),
feetx, feety,
xr, yr, P.scales, gi.horiz_mean, gi.horiz_sig,
gi.polys_im(tt, cam),
cand_rects, cand_scale,
cand_ijs, cand_array(cam));
}
mpi::broadcast(world, cand_rects, 0);
//.........这里部分代码省略.........