本文整理汇总了C++中boost::mpi::communicator::size方法的典型用法代码示例。如果您正苦于以下问题:C++ communicator::size方法的具体用法?C++ communicator::size怎么用?C++ communicator::size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类boost::mpi::communicator
的用法示例。
在下文中一共展示了communicator::size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main()
{
int numClients = 2;
// client settings
float minClientActionDelay = 0.2f; // min wait time for a client before starting new actions
float maxClientActionDelay = 0.4f; // max wait time for a client before it has to complete a new action
float clientQueryWeight = 30.0f; // possibility of a query to happen
float clientReplyWeight = 30.0f; // possibility of a reply to happen
float clientPostWeight = 40.0f; // possibility of a new post (update) to happen
// set the global MPI variabes
gRank = gWorld.rank();
gNumFE = numClients;
gNumRM = gWorld.size() - numClients;
// early out if there are not enough nodes for at least one RM
if(numClients + 1 > gWorld.size() && gWorld.rank() == 0) {
std::cout << "ERROR: there are not enough nodes for at least 1 RM, please increase the number of nodes" << std::endl;
exit(-1);
}
if (gWorld.rank() == 0) {
std::cout << " num RM: " << gNumRM << " num FE: " << gNumFE << std::endl;
}
Log::singleton().open("Bulletin_" + std::to_string(gRank) + ".log"); // set log file
Log::singleton().setVerbosity(LV_Normal);
//the last 'numClients' ranks are front ends
if (gWorld.rank() >= gWorld.size() - numClients) {
std::cout << "P" << gWorld.rank() << ": assigned as a client" << std::endl;
// create client instance
Client client;
// set client variables as defined above
client.setMinActionDelay(minClientActionDelay);
client.setMaxActionDelay(maxClientActionDelay);
client.setQueryWeight(clientQueryWeight);
client.setReplyWeight(clientReplyWeight);
client.setPostWeight(clientPostWeight);
// run the client
// the client will now call the Frontend classes specific functions
// whenever it wants to complete an action.
client.run();
}
else
{
std::cout << "P" << gWorld.rank() << ": assigned as a replicator" << std::endl;
ReplicaManager RM;
RM.run();
}
return 0;
}
示例2: collect_data
void collect_data(mpi::communicator local, mpi::communicator world)
{
// The rank of the collector within the world communicator
int master_collector = world.size() - local.size();
if (world.rank() == master_collector) {
while (true) {
// Wait for a message
mpi::status msg = world.probe();
if (msg.tag() == msg_data_packet) {
// Receive the packet of data
std::vector<int> data;
world.recv(msg.source(), msg.tag(), data);
// Tell each of the collectors that we'll be broadcasting some data
for (int dest = 1; dest < local.size(); ++dest)
local.send(dest, msg_broadcast_data, msg.source());
// Broadcast the actual data.
broadcast(local, data, 0);
} else if (msg.tag() == msg_finished) {
// Receive the message
world.recv(msg.source(), msg.tag());
// Tell each of the collectors that we're finished
for (int dest = 1; dest < local.size(); ++dest)
local.send(dest, msg_finished);
break;
}
}
} else {
while (true) {
// Wait for a message from the master collector
mpi::status msg = local.probe();
if (msg.tag() == msg_broadcast_data) {
// Receive the broadcast message
int originator;
local.recv(msg.source(), msg.tag(), originator);
// Receive the data broadcasted from the master collector
std::vector<int> data;
broadcast(local, data, 0);
std::cout << "Collector #" << local.rank()
<< " is processing data from generator #" << originator
<< "." << std::endl;
} else if (msg.tag() == msg_finished) {
// Receive the message
local.recv(msg.source(), msg.tag());
break;
}
}
}
}
示例3: computeAll_split
void TwoParticleGFContainer::computeAll_split(bool clearTerms, const boost::mpi::communicator & comm)
{
// split communicator
size_t ncomponents = NonTrivialElements.size();
size_t ncolors = std::min(int(comm.size()), int(NonTrivialElements.size()));
RealType color_size = 1.0*comm.size()/ncolors;
std::map<int,int> proc_colors;
std::map<int,int> elem_colors;
std::map<int,int> color_roots;
bool calc = false;
for (size_t p=0; p<comm.size(); p++) {
int color = int (1.0*p / color_size);
proc_colors[p] = color;
color_roots[color]=p;
}
for (size_t i=0; i<ncomponents; i++) {
int color = i*ncolors/ncomponents;
elem_colors[i] = color;
};
if (!comm.rank()) {
INFO("Splitting " << ncomponents << " components in " << ncolors << " communicators");
for (size_t i=0; i<ncomponents; i++)
INFO("2pgf " << i << " color: " << elem_colors[i] << " color_root: " << color_roots[elem_colors[i]]);
};
comm.barrier();
int comp = 0;
boost::mpi::communicator comm_split = comm.split(proc_colors[comm.rank()]);
for(std::map<IndexCombination4, boost::shared_ptr<TwoParticleGF> >::iterator iter = NonTrivialElements.begin(); iter != NonTrivialElements.end(); iter++, comp++) {
bool calc = (elem_colors[comp] == proc_colors[comm.rank()]);
if (calc) {
INFO("C" << elem_colors[comp] << "p" << comm.rank() << ": computing 2PGF for " << iter->first);
if (calc) static_cast<TwoParticleGF&>(*(iter->second)).compute(clearTerms, comm_split);
};
};
comm.barrier();
// distribute data
if (!comm.rank()) INFO_NONEWLINE("Distributing 2PGF container...");
comp = 0;
for(std::map<IndexCombination4, boost::shared_ptr<TwoParticleGF> >::iterator iter = NonTrivialElements.begin(); iter != NonTrivialElements.end(); iter++, comp++) {
int sender = color_roots[elem_colors[comp]];
TwoParticleGF& chi = *((iter)->second);
for (size_t p = 0; p<chi.parts.size(); p++) {
// if (comm.rank() == sender) INFO("P" << comm.rank() << " 2pgf " << p << " " << chi.parts[p]->NonResonantTerms.size());
boost::mpi::broadcast(comm, chi.parts[p]->NonResonantTerms, sender);
boost::mpi::broadcast(comm, chi.parts[p]->ResonantTerms, sender);
if (comm.rank() != sender) {
chi.setStatus(TwoParticleGF::Computed);
};
};
}
comm.barrier();
if (!comm.rank()) INFO("done.");
}
示例4: sendRequest
// Posle pozadavek o praci nahodne vybranemu procesu
void sendRequest() {
int pn = rand() % com.size();
srand(time(NULL));
// Pokud by se nahodne cislo trefilo na moje cislo, generuj nove
if (pn == com.rank()) {
pn = rand() % com.size();
}
//cout << "Sending WORK_REQUEST to " << pn << endl;
com.send(pn, WORK_REQUEST);
}
示例5: manageTranslators
void DocumentDecoder::manageTranslators(
boost::mpi::communicator comm,
NistXmlCorpus &testset
) {
namespace mpi = boost::mpi;
mpi::request reqs[2];
int stopped = 0;
NumberedOutputDocument translation;
reqs[0] = comm.irecv(mpi::any_source, TAG_COLLECT, translation);
reqs[1] = comm.irecv(mpi::any_source, TAG_STOP_COLLECTING);
NistXmlCorpus::const_iterator it = testset.begin();
uint docno = 0;
for(int i = 0; i < comm.size() && it != testset.end(); ++i, ++docno, ++it) {
LOG(logger_, debug, "S: Sending document " << docno << " to translator " << i);
comm.send(i, TAG_TRANSLATE, std::make_pair(docno, *(*it)->asMMAXDocument()));
}
for(;;) {
std::pair<mpi::status, mpi::request *> wstat = mpi::wait_any(reqs, reqs + 2);
if(wstat.first.tag() == TAG_STOP_COLLECTING) {
stopped++;
LOG(logger_, debug, "C: Received STOP_COLLECTING from translator "
<< wstat.first.source() << ", now " << stopped << " stopped translators.");
if(stopped == comm.size()) {
reqs[0].cancel();
return;
}
*wstat.second = comm.irecv(mpi::any_source, TAG_STOP_COLLECTING);
} else {
LOG(logger_, debug, "C: Received translation of document " <<
translation.first << " from translator " << wstat.first.source());
reqs[0] = comm.irecv(mpi::any_source, TAG_COLLECT, translation);
if(it != testset.end()) {
LOG(logger_, debug, "S: Sending document " << docno <<
" to translator " << wstat.first.source());
comm.send(wstat.first.source(), TAG_TRANSLATE,
std::make_pair(docno, *(*it)->asMMAXDocument()));
++docno; ++it;
} else {
LOG(logger_, debug,
"S: Sending STOP_TRANSLATING to translator " << wstat.first.source());
comm.send(wstat.first.source(), TAG_STOP_TRANSLATING);
}
testset[translation.first]->setTranslation(translation.second);
}
}
}
示例6: generate_data
void generate_data(mpi::communicator local, mpi::communicator world)
{
using std::srand;
using std::rand;
// The rank of the collector within the world communicator
int master_collector = local.size();
srand(time(0) + world.rank());
// Send out several blocks of random data to the collectors.
int num_data_blocks = rand() % 3 + 1;
for (int block = 0; block < num_data_blocks; ++block) {
// Generate some random data
int num_samples = rand() % 1000;
std::vector<int> data;
for (int i = 0; i < num_samples; ++i) {
data.push_back(rand());
}
// Send our data to the master collector process.
std::cout << "Generator #" << local.rank() << " sends some data..."
<< std::endl;
world.send(master_collector, msg_data_packet, data);
}
// Wait for all of the generators to complete
(local.barrier)();
// The first generator will send the message to the master collector
// indicating that we're done.
if (local.rank() == 0)
world.send(master_collector, msg_finished);
}
示例7: printit
void
printit(const boost::mpi::communicator& comm,
const std::vector<T>& things,
const std::string& caption)
{
if (!caption.empty() && comm.rank() == 0) {
std::cout << caption << std::endl;
std::cout.flush();
}
for (int p = 0; p < comm.size(); ++p) {
if (comm.rank() == p) {
std::cout << p << ": ";
std::copy(things.begin(), things.end(),
std::ostream_iterator<T>(std::cout, ","));
std::cout << std::endl;
std::cout.flush();
}
comm.barrier();
}
comm.barrier();
size_t global_size;
boost::mpi::reduce(comm, things.size(), global_size, std::plus<size_t>(), 0);
if (comm.rank() == 0) {
if (!caption.empty()) {
std::cout << caption;
}
std::cout << "Number of things: " << global_size << std::endl;
}
comm.barrier();
std::cout << comm.rank() << ": leaving printit()" << std::endl;
}
示例8: compute
void TwoParticleGF::compute(bool clear, const boost::mpi::communicator & comm)
{
if (Status < Prepared) throw (exStatusMismatch());
if (Status >= Computed) return;
if (!Vanishing) {
// Create a "skeleton" class with pointers to part that can call a compute method
pMPI::mpi_skel<ComputeAndClearWrap> skel;
bool fill_container = m_data_.NBosonic() > 0 && m_data_.NFermionic() > 0;
skel.parts.reserve(parts.size());
for (size_t i=0; i<parts.size(); i++) {
skel.parts.push_back(ComputeAndClearWrap(&m_data_, parts[i], clear, fill_container, 1));
};
std::map<pMPI::JobId, pMPI::WorkerId> job_map = skel.run(comm, true); // actual running - very costly
int rank = comm.rank();
int comm_size = comm.size();
// Start distributing data
//DEBUG(comm.rank() << getIndex(0) << getIndex(1) << getIndex(2) << getIndex(3) << " Start distributing data");
comm.barrier();
if (!clear) {
for (size_t p = 0; p<parts.size(); p++) {
boost::mpi::broadcast(comm, parts[p]->NonResonantTerms, job_map[p]);
boost::mpi::broadcast(comm, parts[p]->ResonantTerms, job_map[p]);
if (rank == job_map[p]) {
parts[p]->Status = TwoParticleGFPart::Computed;
};
};
comm.barrier();
}
};
Status = Computed;
}
示例9: generator
void
all_gatherv_test(const mpi::communicator& comm, Generator generator,
std::string kind)
{
typedef typename Generator::result_type value_type;
using boost::mpi::all_gatherv;
std::vector<value_type> myvalues, expected, values;
std::vector<int> sizes;
for(int r = 0; r < comm.size(); ++r) {
value_type value = generator(r);
sizes.push_back(r+1);
for (int k=0; k < r+1; ++k) {
expected.push_back(value);
if(comm.rank() == r) {
myvalues.push_back(value);
}
}
}
if (comm.rank() == 0) {
std::cout << "Gathering " << kind << "...";
std::cout.flush();
}
mpi::all_gatherv(comm, myvalues, values, sizes);
BOOST_CHECK(values == expected);
if (comm.rank() == 0 && values == expected)
std::cout << "OK." << std::endl;
(comm.barrier)();
}
示例10:
void
broadcast_test(const mpi::communicator& comm, const T& bc_value,
std::string const& kind)
{
for (int root = 0; root < comm.size(); ++root) {
broadcast_test(comm, bc_value, kind, root);
}
}
示例11: init
void init(const mpi::communicator& comm)
{
PetscErrorCode ierr;
PetscInt lo, hi;
ierr = VecCreate(comm,&x); // CHKERRQ(ierr);
ierr = VecSetSizes(x, PETSC_DECIDE, 5*comm.size()); // CHKERRQ(ierr);
ierr = VecSetFromOptions(x); // CHKERRQ(ierr);
ierr = VecGetOwnershipRange(x, &lo, &hi);
for (PetscInt i = lo; i <= hi; ++i) {
std::complex<double> v(i, 5*comm.size() - i - 1);
ierr = VecSetValue(x, i, v, INSERT_VALUES);
}
ierr = VecAssemblyBegin(x); // CHKERRQ(ierr);
ierr = VecAssemblyEnd(x); // CHKERRQ(ierr);
}
示例12: runMaster
void runMaster(mpi::communicator world, int size, int grid_dimension)
{
// Start timer and go.
boost::chrono::system_clock::time_point start = boost::chrono::system_clock::now();
// Send
Matrix A(Size(size, size));
Matrix result(Size(size, size));
for(int row = 0; row < A.size.rows; ++row){
for(int col = 0; col < A.size.cols; ++col){
A.data[row][col] = (row % 11) + (col % 11);
}
}
//cout << A << endl;
//cout << "\nProduct:\n" << A*A << endl;
// Do sequential
if (grid_dimension == 0)
A.square(result);
// Else parallel
else{
// Split matrix up and send to slaves
int slave_id = 1;
int sub_matrix_sizes = size / grid_dimension;
for(int i = 0; i < size; i += sub_matrix_sizes){
for(int j = 0; j < size; j += sub_matrix_sizes){
MatrixCrossSection cs = getCrossSection( A, i, j, sub_matrix_sizes);
world.send(slave_id, 0, cs);
slave_id ++;
}
}
// Recieve
std::vector<Matrix> saved;
int num_slaves = world.size() -1;
for(int i = 1; i <= num_slaves; ++i){
Matrix r;
world.recv(i, 0, r);
result.insertSubMatrix(r);
}
}
// Done
boost::chrono::duration<double> sec = boost::chrono::system_clock::now() - start;
cout << sec.count() << endl;
// Print Result
//cout << "\nResult:\n" << result << endl;
//assert ( result == A*A);
}
开发者ID:gareth-ferneyhough,项目名称:Parallel-Matrix-Multiplication,代码行数:55,代码来源:matrix_multiplication_parallel.cpp
示例13: broadcastMessage
// Poslani zpravy vsem ostatnim procesum
void broadcastMessage(int msgType) {
for (int i = 0; i < com.size(); i++) {
// Sobe nic neposilam
if (i == com.rank()) { continue; }
// Pokud jsem nasel vysledek, poslu ho
if (msgType == FOUND || msgType == FOUND_BEST) {
//cout << "Sending (BEST)FOUND to " << i << endl;
com.send(i, msgType, myLongest);
}
// Pri oznameni konce vypoctu neni treba posilat zadna data
else if (msgType == END) {
//cout << "Sending end to " << i << endl;
com.send(i, msgType);
}
}
}
示例14: me
void
random_scattered_vector(const boost::mpi::communicator& comm,
const int& global_size,
std::vector<I>& local_values)
{
int me(comm.rank());
int nproc(comm.size());
std::vector< std::vector<I> > toscatter(nproc);
if (me == 0) {
for (int i = 0; i < global_size; ++i) {
boost::random::uniform_int_distribution<> dist(0, nproc-1);
int p(dist(gen));
toscatter[p].push_back(i);
}
}
scatter(comm, toscatter, local_values, 0);
}
示例15: master
static void master(mpi::communicator world){
int ntasks, rank;
vector<int> data;
int work;
int result;
for(int i = 0; i< 10; i++){
data.push_back(i);
}
const int size_work = (int)data.size();
rank = world.rank(); //int rank(ID) of processor
ntasks = world.size();//int total number of processors
for (rank = 1; rank < ntasks; ++rank) {
get_next_work_item(work,size_work,data);
world.send(rank,WORKTAG,work);
}
int ret = get_next_work_item(work,size_work,data);
while (ret == 0){
mpi::status status = world.recv(mpi::any_source,mpi::any_tag,result);
world.send(status.source(),WORKTAG,work);
ret = get_next_work_item(work,size_work,data);
}
for (rank = 1; rank < ntasks; ++rank) {
world.recv( mpi::any_source, mpi::any_tag,result);
}
for (rank = 1; rank < ntasks; ++rank) {
world.send(rank,DIETAG,0);
}
}