本文整理汇总了C++中boost::mpi::communicator::irecv方法的典型用法代码示例。如果您正苦于以下问题:C++ communicator::irecv方法的具体用法?C++ communicator::irecv怎么用?C++ communicator::irecv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类boost::mpi::communicator
的用法示例。
在下文中一共展示了communicator::irecv方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: manageTranslators
void DocumentDecoder::manageTranslators(
boost::mpi::communicator comm,
NistXmlCorpus &testset
) {
namespace mpi = boost::mpi;
mpi::request reqs[2];
int stopped = 0;
NumberedOutputDocument translation;
reqs[0] = comm.irecv(mpi::any_source, TAG_COLLECT, translation);
reqs[1] = comm.irecv(mpi::any_source, TAG_STOP_COLLECTING);
NistXmlCorpus::const_iterator it = testset.begin();
uint docno = 0;
for(int i = 0; i < comm.size() && it != testset.end(); ++i, ++docno, ++it) {
LOG(logger_, debug, "S: Sending document " << docno << " to translator " << i);
comm.send(i, TAG_TRANSLATE, std::make_pair(docno, *(*it)->asMMAXDocument()));
}
for(;;) {
std::pair<mpi::status, mpi::request *> wstat = mpi::wait_any(reqs, reqs + 2);
if(wstat.first.tag() == TAG_STOP_COLLECTING) {
stopped++;
LOG(logger_, debug, "C: Received STOP_COLLECTING from translator "
<< wstat.first.source() << ", now " << stopped << " stopped translators.");
if(stopped == comm.size()) {
reqs[0].cancel();
return;
}
*wstat.second = comm.irecv(mpi::any_source, TAG_STOP_COLLECTING);
} else {
LOG(logger_, debug, "C: Received translation of document " <<
translation.first << " from translator " << wstat.first.source());
reqs[0] = comm.irecv(mpi::any_source, TAG_COLLECT, translation);
if(it != testset.end()) {
LOG(logger_, debug, "S: Sending document " << docno <<
" to translator " << wstat.first.source());
comm.send(wstat.first.source(), TAG_TRANSLATE,
std::make_pair(docno, *(*it)->asMMAXDocument()));
++docno; ++it;
} else {
LOG(logger_, debug,
"S: Sending STOP_TRANSLATING to translator " << wstat.first.source());
comm.send(wstat.first.source(), TAG_STOP_TRANSLATING);
}
testset[translation.first]->setTranslation(translation.second);
}
}
}
示例2: translate
void DocumentDecoder::translate() {
namespace mpi = boost::mpi;
mpi::request reqs[2];
reqs[1] = communicator_.irecv(0, TAG_STOP_TRANSLATING);
NumberedInputDocument input;
for(;;) {
reqs[0] = communicator_.irecv(0, TAG_TRANSLATE, input);
std::pair<mpi::status, mpi::request *> wstat = mpi::wait_any(reqs, reqs + 2);
if(wstat.first.tag() == TAG_STOP_TRANSLATING) {
LOG(logger_, debug, "T: Received STOP_TRANSLATING.");
reqs[0].cancel();
communicator_.send(0, TAG_STOP_COLLECTING);
return;
} else {
NumberedOutputDocument output;
LOG(logger_, debug, "T: Received document " << input.first << " for translation.");
output.first = input.first;
output.second = runDecoder(input);
LOG(logger_, debug, "T: Sending translation of document " << input.first << " to collector.");
communicator_.send(0, TAG_COLLECT, output);
}
}
}
示例3: mccrun_slave
void mccrun_slave(
const Options& opts, const Eigen::VectorXd& vpar,
const set<observables_t>& obs, const mpi::communicator& mpicomm )
{
// prepare the simulation
HubbardModelVMC model = prepare_model( opts, vpar, mpicomm );
vector< unique_ptr<Observable> > obscalc = prepare_obscalcs( obs, opts );
ObservableCache obscache;
// equilibrate the system
for (
unsigned int mcs = 0;
mcs < opts["calc.num-mcs-equil"].as<unsigned int>();
++mcs )
{
model.mcs();
}
// run this slaves part of the Monte Carlo cycle
unsigned int completed_bins_thisslave = 0;
bool master_out_of_work = false;
unsigned int scheduled_bins_thisslave;
mpicomm.send( 0, MSGTAG_S_M_REQUEST_BINS );
mpicomm.recv( 0, MSGTAG_M_S_DISPATCHED_BINS, scheduled_bins_thisslave );
master_out_of_work = ( scheduled_bins_thisslave == 0 );
while ( scheduled_bins_thisslave > 0 ) {
unsigned int new_scheduled_bins_thisslave;
mpi::request master_answer;
if ( !master_out_of_work ) {
// ask the master for more work
mpicomm.send( 0, MSGTAG_S_M_REQUEST_BINS );
master_answer = mpicomm.irecv(
0, MSGTAG_M_S_DISPATCHED_BINS,
new_scheduled_bins_thisslave
);
}
for (
unsigned int mcs = 0;
mcs < opts["calc.num-binmcs"].as<unsigned int>();
++mcs )
{
// perform a Monte Carlo step
model.mcs();
// measure observables
for ( const unique_ptr<Observable>& o : obscalc ) {
o->measure( model, obscache );
}
obscache.clear();
}
// tell the observables that a bin has been completed
for ( const unique_ptr<Observable>& o : obscalc ) {
o->completebin();
}
// report completion of the work
mpicomm.send( 0, 2 );
++completed_bins_thisslave;
--scheduled_bins_thisslave;
if ( !master_out_of_work ) {
// wait for answer from master concerning the next bin
master_answer.wait();
if ( new_scheduled_bins_thisslave == 1 ) {
++scheduled_bins_thisslave;
} else {
master_out_of_work = true;
}
}
}
// send floating point precision control data to master
mpi::gather( mpicomm, model.get_W_devstat(), 0 );
mpi::gather( mpicomm, model.get_T_devstat(), 0 );
// send observables to master
for ( const unique_ptr<Observable>& o : obscalc ) {
o->send_results_to_master( mpicomm );
}
}