本文整理汇总了C++中boost::mpi::communicator::rank方法的典型用法代码示例。如果您正苦于以下问题:C++ communicator::rank方法的具体用法?C++ communicator::rank怎么用?C++ communicator::rank使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类boost::mpi::communicator
的用法示例。
在下文中一共展示了communicator::rank方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run_model
std::string run_model(Properties& props, boost::mpi::communicator& world) {
try {
repast::relogo::SimulationRunner runner(&world);
if (world.rank() == 0) {
std::string time;
repast::timestamp(time);
std::cout << "Start Time: " << time << std::endl;
}
repast::Timer timer;
timer.start();
runner.run<ZombieObserver, repast::relogo::Patch>(props);
if (world.rank() == 0) {
std::string time;
repast::timestamp(time);
std::cout << "End Time: " << time << "\nElapsed Time: "
<< timer.stop() << std::endl;
}
} catch (std::exception& exp) {
// catch any exception (e.g. if data files couldn't be opened) and
// print out the errors.
std::cerr << "ERROR: " << exp.what() << std::endl;
}
return props.getProperty(OUTPUT_KEY);
}
示例2: generate_data
void generate_data(mpi::communicator local, mpi::communicator world)
{
using std::srand;
using std::rand;
// The rank of the collector within the world communicator
int master_collector = local.size();
srand(time(0) + world.rank());
// Send out several blocks of random data to the collectors.
int num_data_blocks = rand() % 3 + 1;
for (int block = 0; block < num_data_blocks; ++block) {
// Generate some random data
int num_samples = rand() % 1000;
std::vector<int> data;
for (int i = 0; i < num_samples; ++i) {
data.push_back(rand());
}
// Send our data to the master collector process.
std::cout << "Generator #" << local.rank() << " sends some data..."
<< std::endl;
world.send(master_collector, msg_data_packet, data);
}
// Wait for all of the generators to complete
(local.barrier)();
// The first generator will send the message to the master collector
// indicating that we're done.
if (local.rank() == 0)
world.send(master_collector, msg_finished);
}
示例3: printit
void
printit(const boost::mpi::communicator& comm,
const std::vector<T>& things,
const std::string& caption)
{
if (!caption.empty() && comm.rank() == 0) {
std::cout << caption << std::endl;
std::cout.flush();
}
for (int p = 0; p < comm.size(); ++p) {
if (comm.rank() == p) {
std::cout << p << ": ";
std::copy(things.begin(), things.end(),
std::ostream_iterator<T>(std::cout, ","));
std::cout << std::endl;
std::cout.flush();
}
comm.barrier();
}
comm.barrier();
size_t global_size;
boost::mpi::reduce(comm, things.size(), global_size, std::plus<size_t>(), 0);
if (comm.rank() == 0) {
if (!caption.empty()) {
std::cout << caption;
}
std::cout << "Number of things: " << global_size << std::endl;
}
comm.barrier();
std::cout << comm.rank() << ": leaving printit()" << std::endl;
}
示例4: generator
void
all_gatherv_test(const mpi::communicator& comm, Generator generator,
std::string kind)
{
typedef typename Generator::result_type value_type;
using boost::mpi::all_gatherv;
std::vector<value_type> myvalues, expected, values;
std::vector<int> sizes;
for(int r = 0; r < comm.size(); ++r) {
value_type value = generator(r);
sizes.push_back(r+1);
for (int k=0; k < r+1; ++k) {
expected.push_back(value);
if(comm.rank() == r) {
myvalues.push_back(value);
}
}
}
if (comm.rank() == 0) {
std::cout << "Gathering " << kind << "...";
std::cout.flush();
}
mpi::all_gatherv(comm, myvalues, values, sizes);
BOOST_CHECK(values == expected);
if (comm.rank() == 0 && values == expected)
std::cout << "OK." << std::endl;
(comm.barrier)();
}
示例5: collect_data
void collect_data(mpi::communicator local, mpi::communicator world)
{
// The rank of the collector within the world communicator
int master_collector = world.size() - local.size();
if (world.rank() == master_collector) {
while (true) {
// Wait for a message
mpi::status msg = world.probe();
if (msg.tag() == msg_data_packet) {
// Receive the packet of data
std::vector<int> data;
world.recv(msg.source(), msg.tag(), data);
// Tell each of the collectors that we'll be broadcasting some data
for (int dest = 1; dest < local.size(); ++dest)
local.send(dest, msg_broadcast_data, msg.source());
// Broadcast the actual data.
broadcast(local, data, 0);
} else if (msg.tag() == msg_finished) {
// Receive the message
world.recv(msg.source(), msg.tag());
// Tell each of the collectors that we're finished
for (int dest = 1; dest < local.size(); ++dest)
local.send(dest, msg_finished);
break;
}
}
} else {
while (true) {
// Wait for a message from the master collector
mpi::status msg = local.probe();
if (msg.tag() == msg_broadcast_data) {
// Receive the broadcast message
int originator;
local.recv(msg.source(), msg.tag(), originator);
// Receive the data broadcasted from the master collector
std::vector<int> data;
broadcast(local, data, 0);
std::cout << "Collector #" << local.rank()
<< " is processing data from generator #" << originator
<< "." << std::endl;
} else if (msg.tag() == msg_finished) {
// Receive the message
local.recv(msg.source(), msg.tag());
break;
}
}
}
}
示例6: compute
void TwoParticleGF::compute(bool clear, const boost::mpi::communicator & comm)
{
if (Status < Prepared) throw (exStatusMismatch());
if (Status >= Computed) return;
if (!Vanishing) {
// Create a "skeleton" class with pointers to part that can call a compute method
pMPI::mpi_skel<ComputeAndClearWrap> skel;
bool fill_container = m_data_.NBosonic() > 0 && m_data_.NFermionic() > 0;
skel.parts.reserve(parts.size());
for (size_t i=0; i<parts.size(); i++) {
skel.parts.push_back(ComputeAndClearWrap(&m_data_, parts[i], clear, fill_container, 1));
};
std::map<pMPI::JobId, pMPI::WorkerId> job_map = skel.run(comm, true); // actual running - very costly
int rank = comm.rank();
int comm_size = comm.size();
// Start distributing data
//DEBUG(comm.rank() << getIndex(0) << getIndex(1) << getIndex(2) << getIndex(3) << " Start distributing data");
comm.barrier();
if (!clear) {
for (size_t p = 0; p<parts.size(); p++) {
boost::mpi::broadcast(comm, parts[p]->NonResonantTerms, job_map[p]);
boost::mpi::broadcast(comm, parts[p]->ResonantTerms, job_map[p]);
if (rank == job_map[p]) {
parts[p]->Status = TwoParticleGFPart::Computed;
};
};
comm.barrier();
}
};
Status = Computed;
}
示例7: print_section
void print_section (const std::string& str)
{
if (!comm.rank()) {
std::cout << std::string(str.size(),'=') << std::endl;
std::cout << str << std::endl;
std::cout << std::string(str.size(),'=') << std::endl;
};
}
示例8: reduce_and_check
void reduce_and_check(const boost::mpi::communicator &comm, bool local_value) {
if(comm.rank() == 0) {
bool total;
boost::mpi::reduce(comm, local_value, total, std::logical_and<bool>(), 0);
BOOST_CHECK(total);
} else {
boost::mpi::reduce(comm, local_value, std::logical_and<bool>(), 0);
}
}
示例9: gather_resample_weight
void gather_resample_weight() const
{
weight_.resize(this->size());
this->read_weight(weight_.data());
if (world_.rank() == 0)
::boost::mpi::gather(world_, weight_, weight_all_, 0);
else
::boost::mpi::gather(world_, weight_, 0);
}
示例10: defined
void
report_features(mpi::communicator const& comm) {
if (comm.rank() == 0) {
std::cout << "Assuming working MPI_Improbe:" <<
#if defined(BOOST_MPI_USE_IMPROBE)
"yes" << '\n';
#else
"no" << '\n';
#endif
}
}
示例11: computeAll_split
void TwoParticleGFContainer::computeAll_split(bool clearTerms, const boost::mpi::communicator & comm)
{
// split communicator
size_t ncomponents = NonTrivialElements.size();
size_t ncolors = std::min(int(comm.size()), int(NonTrivialElements.size()));
RealType color_size = 1.0*comm.size()/ncolors;
std::map<int,int> proc_colors;
std::map<int,int> elem_colors;
std::map<int,int> color_roots;
bool calc = false;
for (size_t p=0; p<comm.size(); p++) {
int color = int (1.0*p / color_size);
proc_colors[p] = color;
color_roots[color]=p;
}
for (size_t i=0; i<ncomponents; i++) {
int color = i*ncolors/ncomponents;
elem_colors[i] = color;
};
if (!comm.rank()) {
INFO("Splitting " << ncomponents << " components in " << ncolors << " communicators");
for (size_t i=0; i<ncomponents; i++)
INFO("2pgf " << i << " color: " << elem_colors[i] << " color_root: " << color_roots[elem_colors[i]]);
};
comm.barrier();
int comp = 0;
boost::mpi::communicator comm_split = comm.split(proc_colors[comm.rank()]);
for(std::map<IndexCombination4, boost::shared_ptr<TwoParticleGF> >::iterator iter = NonTrivialElements.begin(); iter != NonTrivialElements.end(); iter++, comp++) {
bool calc = (elem_colors[comp] == proc_colors[comm.rank()]);
if (calc) {
INFO("C" << elem_colors[comp] << "p" << comm.rank() << ": computing 2PGF for " << iter->first);
if (calc) static_cast<TwoParticleGF&>(*(iter->second)).compute(clearTerms, comm_split);
};
};
comm.barrier();
// distribute data
if (!comm.rank()) INFO_NONEWLINE("Distributing 2PGF container...");
comp = 0;
for(std::map<IndexCombination4, boost::shared_ptr<TwoParticleGF> >::iterator iter = NonTrivialElements.begin(); iter != NonTrivialElements.end(); iter++, comp++) {
int sender = color_roots[elem_colors[comp]];
TwoParticleGF& chi = *((iter)->second);
for (size_t p = 0; p<chi.parts.size(); p++) {
// if (comm.rank() == sender) INFO("P" << comm.rank() << " 2pgf " << p << " " << chi.parts[p]->NonResonantTerms.size());
boost::mpi::broadcast(comm, chi.parts[p]->NonResonantTerms, sender);
boost::mpi::broadcast(comm, chi.parts[p]->ResonantTerms, sender);
if (comm.rank() != sender) {
chi.setStatus(TwoParticleGF::Computed);
};
};
}
comm.barrier();
if (!comm.rank()) INFO("done.");
}
示例12: sendRequest
// Posle pozadavek o praci nahodne vybranemu procesu
void sendRequest() {
int pn = rand() % com.size();
srand(time(NULL));
// Pokud by se nahodne cislo trefilo na moje cislo, generuj nove
if (pn == com.rank()) {
pn = rand() % com.size();
}
//cout << "Sending WORK_REQUEST to " << pn << endl;
com.send(pn, WORK_REQUEST);
}
示例13: collect_results
/// Reduce the results of the measures, and reports some statistics
void collect_results(boost::mpi::communicator const & c) {
uint64_t nmeasures_tot;
MCSignType sum_sign_tot;
boost::mpi::reduce(c, nmeasures, nmeasures_tot, std::plus<uint64_t>(), 0);
boost::mpi::reduce(c, sum_sign, sum_sign_tot, std::plus<MCSignType>(), 0);
report(3) << "[Node "<<c.rank()<<"] Acceptance rate for all moves:\n" << AllMoves.get_statistics(c);
report(3) << "[Node "<<c.rank()<<"] Simulation lasted: " << double(Timer) << " seconds" << std::endl;
report(3) << "[Node "<<c.rank()<<"] Number of measures: " << nmeasures << std::endl;
report(3) << "[Node "<<c.rank()<<"] Average sign: " << sum_sign / double(nmeasures) << std::endl << std::endl << std::flush;
if (c.rank()==0) {
sign_av = sum_sign_tot / double(nmeasures_tot);
report(2) << "Total number of measures: " << nmeasures_tot << std::endl;
report(2) << "Average sign: " << sign_av << std::endl << std::endl << std::flush;
}
boost::mpi::broadcast(c, sign_av, 0);
AllMeasures.collect_results(c);
}
示例14: main
int main()
{
int numClients = 2;
// client settings
float minClientActionDelay = 0.2f; // min wait time for a client before starting new actions
float maxClientActionDelay = 0.4f; // max wait time for a client before it has to complete a new action
float clientQueryWeight = 30.0f; // possibility of a query to happen
float clientReplyWeight = 30.0f; // possibility of a reply to happen
float clientPostWeight = 40.0f; // possibility of a new post (update) to happen
// set the global MPI variabes
gRank = gWorld.rank();
gNumFE = numClients;
gNumRM = gWorld.size() - numClients;
// early out if there are not enough nodes for at least one RM
if(numClients + 1 > gWorld.size() && gWorld.rank() == 0) {
std::cout << "ERROR: there are not enough nodes for at least 1 RM, please increase the number of nodes" << std::endl;
exit(-1);
}
if (gWorld.rank() == 0) {
std::cout << " num RM: " << gNumRM << " num FE: " << gNumFE << std::endl;
}
Log::singleton().open("Bulletin_" + std::to_string(gRank) + ".log"); // set log file
Log::singleton().setVerbosity(LV_Normal);
//the last 'numClients' ranks are front ends
if (gWorld.rank() >= gWorld.size() - numClients) {
std::cout << "P" << gWorld.rank() << ": assigned as a client" << std::endl;
// create client instance
Client client;
// set client variables as defined above
client.setMinActionDelay(minClientActionDelay);
client.setMaxActionDelay(maxClientActionDelay);
client.setQueryWeight(clientQueryWeight);
client.setReplyWeight(clientReplyWeight);
client.setPostWeight(clientPostWeight);
// run the client
// the client will now call the Frontend classes specific functions
// whenever it wants to complete an action.
client.run();
}
else
{
std::cout << "P" << gWorld.rank() << ": assigned as a replicator" << std::endl;
ReplicaManager RM;
RM.run();
}
return 0;
}
示例15: broadcast
void
broadcast_test(const mpi::communicator& comm, const T& bc_value,
std::string const& kind, int root) {
using boost::mpi::broadcast;
T value;
if (comm.rank() == root) {
value = bc_value;
std::cout << "Broadcasting " << kind << " from root " << root << "...";
std::cout.flush();
}
broadcast(comm, value, root);
BOOST_CHECK(value == bc_value);
if (comm.rank() == root) {
if (value == bc_value) {
std::cout << "OK." << std::endl;
} else {
std::cout << "FAIL." << std::endl;
}
}
comm.barrier();
}