本文整理汇总了C++中boost::mpi::communicator类的典型用法代码示例。如果您正苦于以下问题:C++ communicator类的具体用法?C++ communicator怎么用?C++ communicator使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了communicator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: generator
void
all_gatherv_test(const mpi::communicator& comm, Generator generator,
std::string kind)
{
typedef typename Generator::result_type value_type;
using boost::mpi::all_gatherv;
std::vector<value_type> myvalues, expected, values;
std::vector<int> sizes;
for(int r = 0; r < comm.size(); ++r) {
value_type value = generator(r);
sizes.push_back(r+1);
for (int k=0; k < r+1; ++k) {
expected.push_back(value);
if(comm.rank() == r) {
myvalues.push_back(value);
}
}
}
if (comm.rank() == 0) {
std::cout << "Gathering " << kind << "...";
std::cout.flush();
}
mpi::all_gatherv(comm, myvalues, values, sizes);
BOOST_CHECK(values == expected);
if (comm.rank() == 0 && values == expected)
std::cout << "OK." << std::endl;
(comm.barrier)();
}
示例2: run_model
std::string run_model(Properties& props, boost::mpi::communicator& world) {
try {
repast::relogo::SimulationRunner runner(&world);
if (world.rank() == 0) {
std::string time;
repast::timestamp(time);
std::cout << "Start Time: " << time << std::endl;
}
repast::Timer timer;
timer.start();
runner.run<ZombieObserver, repast::relogo::Patch>(props);
if (world.rank() == 0) {
std::string time;
repast::timestamp(time);
std::cout << "End Time: " << time << "\nElapsed Time: "
<< timer.stop() << std::endl;
}
} catch (std::exception& exp) {
// catch any exception (e.g. if data files couldn't be opened) and
// print out the errors.
std::cerr << "ERROR: " << exp.what() << std::endl;
}
return props.getProperty(OUTPUT_KEY);
}
示例3: compute
void TwoParticleGF::compute(bool clear, const boost::mpi::communicator & comm)
{
if (Status < Prepared) throw (exStatusMismatch());
if (Status >= Computed) return;
if (!Vanishing) {
// Create a "skeleton" class with pointers to part that can call a compute method
pMPI::mpi_skel<ComputeAndClearWrap> skel;
bool fill_container = m_data_.NBosonic() > 0 && m_data_.NFermionic() > 0;
skel.parts.reserve(parts.size());
for (size_t i=0; i<parts.size(); i++) {
skel.parts.push_back(ComputeAndClearWrap(&m_data_, parts[i], clear, fill_container, 1));
};
std::map<pMPI::JobId, pMPI::WorkerId> job_map = skel.run(comm, true); // actual running - very costly
int rank = comm.rank();
int comm_size = comm.size();
// Start distributing data
//DEBUG(comm.rank() << getIndex(0) << getIndex(1) << getIndex(2) << getIndex(3) << " Start distributing data");
comm.barrier();
if (!clear) {
for (size_t p = 0; p<parts.size(); p++) {
boost::mpi::broadcast(comm, parts[p]->NonResonantTerms, job_map[p]);
boost::mpi::broadcast(comm, parts[p]->ResonantTerms, job_map[p]);
if (rank == job_map[p]) {
parts[p]->Status = TwoParticleGFPart::Computed;
};
};
comm.barrier();
}
};
Status = Computed;
}
示例4: runMaster
void runMaster(mpi::communicator world, int size, int grid_dimension)
{
// Start timer and go.
boost::chrono::system_clock::time_point start = boost::chrono::system_clock::now();
// Send
Matrix A(Size(size, size));
Matrix result(Size(size, size));
for(int row = 0; row < A.size.rows; ++row){
for(int col = 0; col < A.size.cols; ++col){
A.data[row][col] = (row % 11) + (col % 11);
}
}
//cout << A << endl;
//cout << "\nProduct:\n" << A*A << endl;
// Do sequential
if (grid_dimension == 0)
A.square(result);
// Else parallel
else{
// Split matrix up and send to slaves
int slave_id = 1;
int sub_matrix_sizes = size / grid_dimension;
for(int i = 0; i < size; i += sub_matrix_sizes){
for(int j = 0; j < size; j += sub_matrix_sizes){
MatrixCrossSection cs = getCrossSection( A, i, j, sub_matrix_sizes);
world.send(slave_id, 0, cs);
slave_id ++;
}
}
// Recieve
std::vector<Matrix> saved;
int num_slaves = world.size() -1;
for(int i = 1; i <= num_slaves; ++i){
Matrix r;
world.recv(i, 0, r);
result.insertSubMatrix(r);
}
}
// Done
boost::chrono::duration<double> sec = boost::chrono::system_clock::now() - start;
cout << sec.count() << endl;
// Print Result
//cout << "\nResult:\n" << result << endl;
//assert ( result == A*A);
}
开发者ID:gareth-ferneyhough,项目名称:Parallel-Matrix-Multiplication,代码行数:55,代码来源:matrix_multiplication_parallel.cpp
示例5: runSlave
void runSlave(mpi::communicator world)
{
// Recieve
MatrixCrossSection cs;
world.recv(0, 0, cs);
Matrix subMatrix(Size(cs.row_data.size(), cs.row_data.size()));
cs.calculateVectorProduct(subMatrix);
world.send(0, 0, subMatrix);
}
开发者ID:gareth-ferneyhough,项目名称:Parallel-Matrix-Multiplication,代码行数:11,代码来源:matrix_multiplication_parallel.cpp
示例6: printit
void
printit(const boost::mpi::communicator& comm,
const std::vector<T>& things,
const std::string& caption)
{
if (!caption.empty() && comm.rank() == 0) {
std::cout << caption << std::endl;
std::cout.flush();
}
for (int p = 0; p < comm.size(); ++p) {
if (comm.rank() == p) {
std::cout << p << ": ";
std::copy(things.begin(), things.end(),
std::ostream_iterator<T>(std::cout, ","));
std::cout << std::endl;
std::cout.flush();
}
comm.barrier();
}
comm.barrier();
size_t global_size;
boost::mpi::reduce(comm, things.size(), global_size, std::plus<size_t>(), 0);
if (comm.rank() == 0) {
if (!caption.empty()) {
std::cout << caption;
}
std::cout << "Number of things: " << global_size << std::endl;
}
comm.barrier();
std::cout << comm.rank() << ": leaving printit()" << std::endl;
}
示例7: slave
static void slave(mpi::communicator world) {
int work;
int result;
while (1) {
mpi::status status = world.recv(mpi::any_source,mpi::any_tag,work);
if (status.tag() == DIETAG) {
return;
}
do_work(work,result);
world.send(0,0,result);
}
}
示例8: Z
void bi::MarginalSISHandler<B,A,S>::handleAdapterSamples(
boost::mpi::communicator child, boost::mpi::status status) {
typedef typename temp_host_matrix<real>::type matrix_type;
static const int N = B::NP;
/* add samples */
boost::optional<int> n = status.template count<real>();
if (n) {
matrix_type Z(N + T, *n / (N + T));
child.recv(status.source(), status.tag(), Z.buf(), *n);
for (int j = 0; j < Z.size2(); ++j) {
adapter.add(subrange(column(Z,j), 0, N), subrange(column(Z,j), N, T));
}
}
/* send new proposal if necessary */
if (adapter.stop(t)) {
adapter.adapt(t);
BOOST_AUTO(q, adapter.get(t));
BOOST_AUTO(iter, node.children.begin());
for (; iter != node.children.end(); ++iter) {
node.requests.push_front(iter->isend(0, MPI_TAG_ADAPTER_PROPOSAL, q));
}
///@todo Serialize q into archive just once, then send to all. This may
///be how broadcast is already implemented in Boost.MPI.
}
}
示例9:
void
broadcast_test(const mpi::communicator& comm, const T& bc_value,
std::string const& kind)
{
for (int root = 0; root < comm.size(); ++root) {
broadcast_test(comm, bc_value, kind, root);
}
}
示例10: init
void init(const mpi::communicator& comm)
{
PetscErrorCode ierr;
PetscInt lo, hi;
ierr = VecCreate(comm,&x); // CHKERRQ(ierr);
ierr = VecSetSizes(x, PETSC_DECIDE, 5*comm.size()); // CHKERRQ(ierr);
ierr = VecSetFromOptions(x); // CHKERRQ(ierr);
ierr = VecGetOwnershipRange(x, &lo, &hi);
for (PetscInt i = lo; i <= hi; ++i) {
std::complex<double> v(i, 5*comm.size() - i - 1);
ierr = VecSetValue(x, i, v, INSERT_VALUES);
}
ierr = VecAssemblyBegin(x); // CHKERRQ(ierr);
ierr = VecAssemblyEnd(x); // CHKERRQ(ierr);
}
示例11: manageTranslators
void DocumentDecoder::manageTranslators(
boost::mpi::communicator comm,
NistXmlCorpus &testset
) {
namespace mpi = boost::mpi;
mpi::request reqs[2];
int stopped = 0;
NumberedOutputDocument translation;
reqs[0] = comm.irecv(mpi::any_source, TAG_COLLECT, translation);
reqs[1] = comm.irecv(mpi::any_source, TAG_STOP_COLLECTING);
NistXmlCorpus::const_iterator it = testset.begin();
uint docno = 0;
for(int i = 0; i < comm.size() && it != testset.end(); ++i, ++docno, ++it) {
LOG(logger_, debug, "S: Sending document " << docno << " to translator " << i);
comm.send(i, TAG_TRANSLATE, std::make_pair(docno, *(*it)->asMMAXDocument()));
}
for(;;) {
std::pair<mpi::status, mpi::request *> wstat = mpi::wait_any(reqs, reqs + 2);
if(wstat.first.tag() == TAG_STOP_COLLECTING) {
stopped++;
LOG(logger_, debug, "C: Received STOP_COLLECTING from translator "
<< wstat.first.source() << ", now " << stopped << " stopped translators.");
if(stopped == comm.size()) {
reqs[0].cancel();
return;
}
*wstat.second = comm.irecv(mpi::any_source, TAG_STOP_COLLECTING);
} else {
LOG(logger_, debug, "C: Received translation of document " <<
translation.first << " from translator " << wstat.first.source());
reqs[0] = comm.irecv(mpi::any_source, TAG_COLLECT, translation);
if(it != testset.end()) {
LOG(logger_, debug, "S: Sending document " << docno <<
" to translator " << wstat.first.source());
comm.send(wstat.first.source(), TAG_TRANSLATE,
std::make_pair(docno, *(*it)->asMMAXDocument()));
++docno; ++it;
} else {
LOG(logger_, debug,
"S: Sending STOP_TRANSLATING to translator " << wstat.first.source());
comm.send(wstat.first.source(), TAG_STOP_TRANSLATING);
}
testset[translation.first]->setTranslation(translation.second);
}
}
}
示例12: reduce_and_check
void reduce_and_check(const boost::mpi::communicator &comm, bool local_value) {
if(comm.rank() == 0) {
bool total;
boost::mpi::reduce(comm, local_value, total, std::logical_and<bool>(), 0);
BOOST_CHECK(total);
} else {
boost::mpi::reduce(comm, local_value, std::logical_and<bool>(), 0);
}
}
示例13: random_scattered_vector
void
random_scattered_vector(const boost::mpi::communicator& comm,
const int& global_size,
std::vector<I>& local_values)
{
int me(comm.rank());
int nproc(comm.size());
std::vector< std::vector<I> > toscatter(nproc);
if (me == 0) {
for (int i = 0; i < global_size; ++i) {
boost::random::uniform_int_distribution<> dist(0, nproc-1);
int p(dist(gen));
toscatter[p].push_back(i);
}
}
scatter(comm, toscatter, local_values, 0);
}
示例14: mpi_send_workaround
void mpi_send_workaround(int dest, int tag, const T& value,
boost::mpi::communicator & comm)
{
// serialize T into a string
std::ostringstream oss;
boost::archive::text_oarchive oa(oss);
oa << value;
// send the string
comm.send(dest, tag, oss.str());
}
示例15: sendRequest
// Posle pozadavek o praci nahodne vybranemu procesu
void sendRequest() {
int pn = rand() % com.size();
srand(time(NULL));
// Pokud by se nahodne cislo trefilo na moje cislo, generuj nove
if (pn == com.rank()) {
pn = rand() % com.size();
}
//cout << "Sending WORK_REQUEST to " << pn << endl;
com.send(pn, WORK_REQUEST);
}