本文整理汇总了C++中mpi::Status类的典型用法代码示例。如果您正苦于以下问题:C++ Status类的具体用法?C++ Status怎么用?C++ Status使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Status类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: receive_from_master
void Module_DCREATE::receive_from_master() const {
size_t length;
MPI::Status status;
int k;
int blockLength;
MPI::COMM_WORLD.Recv(&k,1,MPI::INT,0,COMMUNICATION_CHANNEL);
MPI::COMM_WORLD.Recv(&blockLength,1,MPI::INT,0,COMMUNICATION_CHANNEL);
MPI::COMM_WORLD.Probe(0,COMMUNICATION_CHANNEL,status);
length = status.Get_count(MPI::CHAR);
char input[length];
MPI::COMM_WORLD.Recv(input,length,MPI::CHAR,0,COMMUNICATION_CHANNEL);
MPI::COMM_WORLD.Probe(0,COMMUNICATION_CHANNEL,status);
length = status.Get_count(MPI::CHAR);
char output[length];
MPI::COMM_WORLD.Recv(output,length,MPI::CHAR,0,COMMUNICATION_CHANNEL);
DEFAULT_CHANNEL << "Informations from master received by node " << my_rank << endl;
if (strlen(input) != 0 and strlen(output) != 0)
compute_hash(k,blockLength,input,output,false); //TODO handle methyl_hash
}
示例2: recvData
bool recvData(std::vector<double>& receivedData)
{
bool isDataReceived = false;
if ( intraComm != MPI::COMM_NULL)
{
MPI::Status status;
double buffer[100];
intraComm.Recv(buffer, 100,
MPI::DOUBLE,
MPI::ANY_SOURCE,
/*tag*/ 100,
status);
int count = status.Get_count(MPI::DOUBLE);
receivedData = std::vector<double>(buffer, buffer+count);
log.Info() << "RECV [ " << getRank()
<< " <-- "
<< status.Get_source()
<< " ] data : "
<< receivedData
<< std::endl;
isDataReceived = true;
}else
{
log.Err() << "PID " << getProcessId()
<< " failed to RECV"
<< std::endl;
}
return isDataReceived;
}
示例3: lMsg
void HPC::MPICommunication::waitReception(Request::Handle ioRequest) const
{
Beagle_StackTraceBeginM();
Beagle_NonNullPointerAssertM(ioRequest);
MPI::Status lStatus;
ioRequest->mSizeRequest.Wait(lStatus);
if(lStatus.Is_cancelled()) return;
int lRank = lStatus.Get_source();
int lMsgSize = ioRequest->mSize;
std::string lStringTag = ioRequest->mTag + "_str";
MPI::COMM_WORLD.Probe(lRank,hashTag(lStringTag),lStatus);
Beagle_AssertM(lStatus.Get_count(MPI::CHAR) == lMsgSize);
//constructing a string of the right size.
std::string lMsg(lMsgSize, ' ');
MPI::COMM_WORLD.Recv(&lMsg[0], lMsgSize, MPI::CHAR, lRank, hashTag(lStringTag));
#ifdef BEAGLE_HAVE_LIBZ
if(mCompressionLevel->getWrappedValue() > 0){
ioRequest->mMessage = new Beagle::String;
decompressString(lMsg, ioRequest->mMessage->getWrappedValue());
} else {
ioRequest->mMessage = new Beagle::String(lMsg);
}
#else
ioRequest->mMessage = new Beagle::String(lMsg);
#endif
Beagle_HPC_StackTraceEndM("void HPC::MPICommunication::waitReception(Request::Handle) const");
}
示例4:
void HPC::MPICommunication::waitSending(Request::Handle ioRequest) const
{
Beagle_StackTraceBeginM();
Beagle_NonNullPointerAssertM(ioRequest);
MPI::Status lStatus;
ioRequest->mSizeRequest.Wait(lStatus);
if(lStatus.Is_cancelled()) return;
ioRequest->mMsgRequest.Wait();
Beagle_HPC_StackTraceEndM("void HPC::MPICommunication::waitReception(Request::Handle) const");
}
示例5:
void
ParaCommMpiWorld::probe(
int* source,
int* tag
)
{
MPI::Status mpiStatus;
MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, mpiStatus);
*source = mpiStatus.Get_source();
*tag = mpiStatus.Get_tag();
TAG_TRACE (Probe, From, *source, *tag);
}
示例6: buffer
//#####################################################################
// Function Recv_Columns
//#####################################################################
template<class T_GRID> template<class T_ARRAYS_HORIZONTAL_COLUMN> void MPI_RLE_GRID<T_GRID>::
Recv_Columns(T_ARRAYS_HORIZONTAL_COLUMN& columns,const ARRAY<T_BOX_HORIZONTAL_INT>& regions,const int tag,const MPI::Status& probe_status) const
{
ARRAY<char> buffer(probe_status.Get_count(MPI::PACKED));
int position=0;
comm->Recv(&buffer(1),buffer.m,MPI::PACKED,probe_status.Get_source(),tag);
TV_HORIZONTAL_INT direction;
MPI_UTILITIES::Unpack(direction,buffer,position,*comm);
int neighbor=0;
all_neighbor_directions.Find(-direction,neighbor);
for(typename T_HORIZONTAL_GRID::CELL_ITERATOR iterator(local_grid.horizontal_grid,regions(neighbor)); iterator.Valid(); iterator.Next())
MPI_UTILITIES::Unpack(columns(iterator.Cell_Index()),buffer,position,*comm);
}
示例7: readPopulation
bool Neatzsche_MPI::readPopulation(Phenotypes * p, Coevolution * c, TransferFunctions * tfs)
{
MPI::Status status;
MPI::Datatype ndt,gdt;
int genomes,genes,nodes,id;
MPI::COMM_WORLD.Recv(&genomes,1,MPI::INT,0,0);//Receive the number of genome
NeuralNodeSmall * nns;
GeneSmall * gs;
Genome * genome = NULL;
int stringc=0;
char *strbuf;
vector<string> * ftypes = NULL;
for(int i=0;i<genomes;i++){
ftypes = new vector<string>();
MPI::COMM_WORLD.Recv(&id,1,MPI_INT,0,0);
MPI::COMM_WORLD.Recv(&nodes,1,MPI_INT,0,0);
MPI::COMM_WORLD.Recv(&genes,1,MPI_INT,0,0);
// nns = (NeuralNodeSmall*)malloc(sizeof(NeuralNodeSmall)*nodes);
// gs = (GeneSmall*)malloc(sizeof(GeneSmall)*genes);
nns = new NeuralNodeSmall [nodes];
gs = new GeneSmall[genes];
nodetype = Build_neuralnode_type(&nns[0]);
MPI::COMM_WORLD.Recv(nns,nodes,nodetype,0,0);
for(int i=0;i<nodes;i++){//blargh, 1 int would be more usefull in this case:P
MPI::COMM_WORLD.Probe(0, MPI_Cont, status);
stringc = status.Get_count(MPI_CHAR);
strbuf = (char*) malloc(sizeof(char)*stringc);
MPI::COMM_WORLD.Recv(strbuf,stringc,MPI::CHAR,0,0);//receive the ftype of the node
ftypes->push_back(string(strbuf).substr(0,stringc));
free(strbuf);
}
genetype = Build_gene_type(&gs[0]);
MPI::COMM_WORLD.Recv(gs,genes,genetype,0,0);
genome = new Genome(tfs);
genome->fromSmall(id,nodes,nns,genes,gs,ftypes);
delete ftypes;
p->push_back(new Phenotype(genome));
if(nodes>0)
delete[] nns;
if(genes>0)
delete[] gs;
}
unsigned int cont;
MPI::COMM_WORLD.Recv(&cont,1,MPI::INT,0,0);//continue or stop?
return cont == MPI_Cont;
}
示例8: hashTag
/*!
* \brief Receive message from a specific node rank via MPI
* \param outMessage Message receive.
* \param inTag Tag associated to the message to be received.
* \param inRank Node rank of the sending node.
*/
void HPC::MPICommunication::receive(std::string& outMessage, const std::string& inTag, int inRank) const
{
Beagle_StackTraceBeginM();
MPI::Status lStatus;
int lSize = 0;
MPI::COMM_WORLD.Recv(&lSize, 1, MPI::INT, inRank, hashTag(inTag+"_size"));
MPI::COMM_WORLD.Probe(inRank,hashTag(inTag+"_str"),lStatus);
Beagle_AssertM(lStatus.Get_count(MPI::CHAR) == lSize);
outMessage.resize(lSize);
MPI::COMM_WORLD.Recv(&outMessage[0], lSize, MPI::CHAR, lStatus.Get_source(), hashTag(inTag+"_str"));
#ifdef BEAGLE_HAVE_LIBZ
if(mCompressionLevel->getWrappedValue() > 0){
std::string lString;
decompressString(outMessage, lString);
outMessage = lString;
}
#endif
Beagle_HPC_StackTraceEndM("void HPC::MPICommunication::receive(std::string&, const std::string&, int) const");
}
示例9: receive
bool
ParaCommMpiWorld::waitToken(
int tempRank
)
{
pthread_mutex_lock(&tokenAccessLock);
if( token[0] == myRank )
{
pthread_mutex_unlock(&tokenAccessLock);
return true;
}
else
{
int previousRank = myRank - 1;
if( previousRank == 0 )
{
if( token[0] != -1 )
{
previousRank = comSize - 1;
}
}
int receivedTag;
MPI::Status mpiStatus;
MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, mpiStatus);
receivedTag = mpiStatus.Get_tag();
TAG_TRACE (Probe, From, mpiStatus.Get_source(), receivedTag);
if( receivedTag == TagToken )
{
receive(token, 2, ParaINT, 0, TagToken);
assert(token[0] == myRank);
pthread_mutex_unlock(&tokenAccessLock);
return true;
}
else
{
pthread_mutex_unlock(&tokenAccessLock);
return false;
}
}
}
示例10: f
void PSO::Swarm::evaluate_slave() {
double f(log(0.0));
int id(0);
int flag(0);
int tag(0);
int dest(0);
Point position(numParams);
MPI::Status status;
// fprintf(stderr,"Slave %d ready.\n",mpi_rank);
while (1) {
// flag = MPI::COMM_WORLD.Iprobe(0,MPI::ANY_TAG,status);
// if (flag) {
// tag = status.Get_tag();
MPI::COMM_WORLD.Recv(&id,1,MPI::INT,0,MPI::ANY_TAG,status);
if (status.Get_tag() == 0) break;
MPI::COMM_WORLD.Recv(position.data(),numParams,MPI::DOUBLE,0,MPI::ANY_TAG,status);
f = p->evalFunc(position,p->evalParams);
MPI::COMM_WORLD.Send(&id,1,MPI::INT,0,2);
MPI::COMM_WORLD.Send(&f,1,MPI::DOUBLE,0,2);
// }
}
// fprintf(stderr,"Slave %d done.\n",mpi_rank);
}
示例11: main
int main ( int argc, char *argv[] )
//****************************************************************************80
//
// Purpose:
//
// MAIN is the main program for DAY1.
//
// Discussion:
//
// DAY1 is exercise 3 for first day of the MPI workshop
//
// The instructions say:
//
// Process 1 computes the squares of the first 200 integers.
// It sends this data to process 3.
//
// Process 3 should divide the integers between 20 and 119 by 53,
// getting a real result, and passes this data back to process 1.
//
// * I presume the first 200 integers are the numbers 0 through 199.
//
// * The instructions literally mean that process 3 should look
// at integers whose VALUES are between 20 and 119. I doubt that
// is what the instructor meant, but it's more interesting than
// simply picking the entries with index between 20 and 119,
// so that's what I'll do.
//
// * It is also not completely clear whether only the selected data
// should be sent back, or the entire array. Again, it is more
// interesting to send back only part of the data.
//
// Licensing:
//
// This code is distributed under the GNU LGPL license.
//
// Author:
//
// John Burkardt
//
// Reference:
//
// William Gropp, Ewing Lusk, Anthony Skjellum,
// Using MPI: Portable Parallel Programming with the
// Message-Passing Interface,
// Second Edition,
// MIT Press, 1999,
// ISBN: 0262571323.
//
// Modified:
//
// 26 October 2011
//
// Author:
//
// John Burkardt
//
{
# define I_DIM 200
# define R_DIM 200
int count;
int count2;
int dest;
int i;
int i_buffer[I_DIM];
int id;
int p;
float r_buffer[R_DIM];
int source;
MPI::Status status;
int tag;
//
// Initialize MPI.
//
MPI::Init ( argc, argv );
//
// Determine this process's rank.
//
id = MPI::COMM_WORLD.Get_rank ( );
//
// Get the number of processes.
//
p = MPI::COMM_WORLD.Get_size ( );
//
// Have Process 0 say hello.
//
if ( id == 0 )
{
timestamp ( );
cout << "\n";
cout << "DAY1:\n";
cout << " C++ version\n";
cout << " An MPI example program.\n";
cout << "\n";
cout << " Compiled on " << __DATE__ << " at " << __TIME__ << "\n";
cout << "\n";
cout << " The number of processes available is " << p << "\n";
}
//
//.........这里部分代码省略.........
示例12: recv_output
Module_DMAP::Transmitting_Result Module_DMAP::recv_output(int node, int id) {
int count;
unsigned long int * positions;
unsigned long int * global_positions;
int * contigs;
//t_alignment * types;
int * NMs;
int * lengths;
int * algn;
unsigned short int * bools;
unsigned int * trim_info;
char * informations;
unsigned short int bool_temp;
Mask * reads;
{
mutex::scoped_lock lock(mpi_mutex);
//DEFAULT_CHANNEL << '[' << my_rank << ',' << id << "] Waiting info from node " << node << " to node " << my_rank << endl;
if (finished)
return Transmitting_Result(NULL,0);
MPI::COMM_WORLD.Recv(&count,1,MPI::INT,my_rank-1,COMMUNICATION_CHANNEL);
//DEFAULT_CHANNEL << '[' << my_rank << ',' << id << "] Receive " << count << " OUTPUTs from node " << node << " to node " << my_rank << endl;
if (count == 0) {
finished = true;
return Transmitting_Result(NULL,0);
}
positions = new unsigned long int[count*2];
global_positions = new unsigned long int[count*2];
contigs = new int[count];
//types = new t_alignment[count];
NMs = new int[count*2];
lengths = new int[count*2];
algn = new int[count];
bools = new unsigned short int[count];
trim_info = new unsigned int[count*2];
size_t sum;
MPI::Status status;
MPI::COMM_WORLD.Probe(node,DATA_CHANNEL,status);
sum = status.Get_count(MPI::CHAR);
informations = new char[sum];
MPI::COMM_WORLD.Recv(informations,sum,MPI::CHAR,node,DATA_CHANNEL);
MPI::COMM_WORLD.Recv(positions,count*2,MPI::UNSIGNED_LONG,node,DATA_CHANNEL);
MPI::COMM_WORLD.Recv(global_positions,count*2,MPI::UNSIGNED_LONG,node,DATA_CHANNEL);
MPI::COMM_WORLD.Recv(contigs,count,MPI::INT,node,DATA_CHANNEL);
//MPI::COMM_WORLD.Recv(types,count*sizeof(t_alignment),MPI::CHAR,node,DATA_CHANNEL);
MPI::COMM_WORLD.Recv(NMs,count*2,MPI::INT,node,DATA_CHANNEL);
MPI::COMM_WORLD.Recv(lengths,count*2,MPI::INT,node,DATA_CHANNEL);
MPI::COMM_WORLD.Recv(algn,count,MPI::INT,node,DATA_CHANNEL);
MPI::COMM_WORLD.Recv(bools,count,MPI::UNSIGNED_SHORT,node,DATA_CHANNEL);
MPI::COMM_WORLD.Recv(trim_info,count*2,MPI::UNSIGNED,node,DATA_CHANNEL);
}
reads = new Mask[count];
char * h = informations;
for (int i = 0; i < count; i++) {
Mask & r = reads[i];
r.id = string(h);
h += r.id.size() + 1;
r.sequence = string(h);
h += r.sequence.size() + 1;
r.quality = string(h);
h += r.sequence.size() + 1;
r.position = positions[i*2];
r.position_gap = positions[i*2+1];
r.globalPosition = global_positions[i*2];
r.globalPosition_gap = global_positions[i*2+1];
r.contig = contigs[i];
r.length1_gap = lengths[i*2];
r.length2_gap = lengths[i*2+1];
//r.type = types[i];
r.NM = NMs[i*2];
r.NM_gap = NMs[i*2+1];
r.algn = algn[i];
r.good_region_start = trim_info[i*2];
r.good_region_stop = trim_info[i*2+1];
bool_temp = bools[i];
r.strand = bool_temp & 0x01;
r.masked = bool_temp & 0x02;
r.low_quality = bool_temp & 0x04;
r.trimmed = bool_temp & 0x08;
r.discarded = bool_temp & 0x10;
r.low_complexity = bool_temp & 0x20;
r.contaminated = bool_temp & 0x40;
r.gapped = bool_temp & 0x80;
}
delete [] positions;
delete [] contigs;
//delete [] types;
delete [] NMs;
delete [] algn;
delete [] bools;
delete [] trim_info;
delete [] lengths;
delete [] global_positions;
delete [] informations;
//.........这里部分代码省略.........
示例13: main
int main(int argc, char * argv[]){
int tag, send_tag;//tag in MPI_Recv
int to,from;//destination and source of MPI send/receive
int st_count, st_source, st_tag;
double start_time = 0.0;//set start and end time for MPI_Wtime()
double end_time = 0.0;
MPI::Status status;
MPI::Init(argc, argv);//start MPI
int rank = MPI::COMM_WORLD.Get_rank();//The rank label of the machines
int size = MPI::COMM_WORLD.Get_size();//The number of tasks to be done
// MPI_Barrier(MPI_COMM_WORLD);
int option;
opterr = 0;
int N = 0;
string directory;
while ((option = getopt(argc, argv, "d:n:"))!= -1)//getopt parses the parameters of commands, -n is the first n words that occur most frequently in files, -d is the directory which contains the files that need to be parsed.
{
switch (option)
{
case 'n':
N = atoi(optarg);//the first N words
break;
case 'd':
directory = string(optarg);// parameter of the directory
// cout << dir <<endl;
break;
case '?'://when the parameter of option n is wrong, show the error information
if (optopt == 'n')
cerr<< "Option -"<<char(optopt)<<" requires an argument." <<endl;
else if (isprint (optopt))
cerr<< "Unknown option `-"<<char(optopt)<<"'.\n"<<endl;
else
cerr<< "Unknown option character `"<<std::hex<<optopt<<"'."<<endl;
}
}
vector<string> filenames;//use this vector to store file names
char buffer[1024];
if(rank == 0)//Machine 0 parses the name of directory and files in the directory.
{
struct dirent *ptr;
DIR *dir;
dir = opendir(directory.c_str());//open the directory
while((ptr = readdir(dir))!=NULL)//read the name of the directory
{
if(ptr->d_name[0]=='.')
continue;
strcpy(buffer,directory.c_str());
strcat(buffer,ptr->d_name);
// cout<<buffer<<endl;
filenames.push_back(string(buffer));//put the file names of the directory in the vector filenames
};
}
if(rank == 0)//machine 0 send messages and assign tasks to all the machines, including itself.
{
start_time = MPI_Wtime();//star time stamp
to = 0;
send_tag = 0;
int round = 0;
while(round * size < filenames.size())
{
for(int i = round * size; i < (round + 1) * size && i < filenames.size(); i++)
{
sprintf(buffer, "%s", filenames[i].c_str());
// cout << rank << ":"<< "sending " << buffer << endl;
MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, i%size, send_tag);//send filenames to the other machines and let them parse the files, including itself.
to++;
send_tag++;
}
tag = MPI::ANY_TAG;
from = MPI::ANY_SOURCE;
MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);//rank 0 receive parsing result from the rest machines, including itself
st_count = status.Get_count(MPI::CHAR);
st_source = status.Get_source();
st_tag = status.Get_tag();
string result("");
result = parse(buffer, N);
strcpy(buffer,result.c_str());
MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, 0, st_tag);//rank 0 send message to itself
for(int i = round * size; i < (round + 1) * size && i < filenames.size(); i++)
{
tag = MPI::ANY_TAG;
from = MPI::ANY_SOURCE;
MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);
st_count = status.Get_count(MPI::CHAR);
//.........这里部分代码省略.........
示例14: main
int main ( int argc, char *argv[] )
//****************************************************************************80
//
// Purpose:
//
// MAIN is the main program for MONTE_CARLO.
//
// Discussion:
//
// MONTE_CARLO illustrates the use of MPI with a Monte Carlo algorithm.
//
// Generate N random points in the unit square. Count M, the number
// of points that are in the quarter circle. Then PI is approximately
// equal to the ratio 4 * M / N.
//
// It's important that each processor use DIFFERENT random numbers.
// One way to ensure this is to have a single master processor
// generate all the random numbers, and then divide them up.
//
// (A second way, not explored here, is simply to ensure that each
// processor uses a different seed, either chosen by a master processor,
// or generated from the processor ID.)
//
// Licensing:
//
// This code is distributed under the GNU LGPL license.
//
// Modified:
//
// 26 February 2007
//
// Author:
//
// John Burkardt
//
// Reference:
//
// William Gropp, Ewing Lusk, Anthony Skjellum,
// Using MPI: Portable Parallel Programming with the
// Message-Passing Interface,
// Second Edition,
// MIT Press, 1999,
// ISBN: 0262571323.
//
{
double calculatedPi;
int dest;
int done;
double error;
int i;
int id;
int in;
int max;
MPI::Status mesgStatus;
int num_procs;
int out;
int point_max = 1000000;
int randServer;
int randNums[CHUNKSIZE];
int ranks[1];
int request;
int temp;
double tolerance;
int totalin;
int totalout;
MPI::Group worker_group;
MPI::Intracomm worker_comm;
MPI::Group world_group;
double x;
double y;
//
// Initialize MPI.
//
MPI::Init ( argc, argv );
//
// Get the number of processors.
//
num_procs = MPI::COMM_WORLD.Get_size ( );
//
// Get the rank of this processor.
//
id = MPI::COMM_WORLD.Get_rank ( );
if ( id == 0 )
{
timestamp ( );
cout << "\n";
cout << "MONTE_CARLO - Master process:\n";
cout << " C++ version\n";
cout << " Estimate pi by the Monte Carlo method, using MPI.\n";
cout << "\n";
cout << " Compiled on : " << __DATE__ << " at " << __TIME__ << ".\n";
cout << "\n";
cout << " The number of processes is " << num_procs << ".\n";
cout << "\n";
cout << " Points in the unit square will be tested\n";
cout << " to see if they lie in the unit quarter circle.\n";
}
//
//.........这里部分代码省略.........
示例15: start
void PPS::start(){
//Define parameters struct for mpi
//Refer to this as an example http://lists.mcs.anl.gov/pipermail/mpich-discuss/2009-April/004880.html
MPI::Datatype MPIPPSTRUCT;
int blockcounts[2];
MPI::Aint offsets[2];
MPI::Datatype datatypes[2];
MPI::Aint extent,lb;
blockcounts[0] = 9; //Number of ints
blockcounts[1] = 13; //number of __fpv
datatypes[0] = MPI::INT;
datatypes[1] = MPIFPV;
offsets[0] = 0;
MPI::INT.Get_extent(lb, extent);
offsets[1] = blockcounts[0] * extent;
MPIPPSTRUCT = MPIPPSTRUCT.Create_struct(2,blockcounts,offsets, datatypes);
MPIPPSTRUCT.Commit();
if(PPS::pid == 0){
struct parameters temp;
int start,i,countdown = PPS::comm_size-1;
bool ready = false;
MPI::Status status;
//Logs
std::ofstream logsfile;
logsfile.open("tslogs.txt", std::fstream::out | std::fstream::trunc);
while(true){
if(countdown == 0) break;
//Check first ready-to-compute process
MPI::COMM_WORLD.Recv(&ready, 1, MPI::BOOL, MPI_ANY_SOURCE, 0, status);
//Logs
logsfile << "Remaining sims: " << PPS::plist.size() << " process countdown: " << countdown << std::endl;
//Send a 0 status to all the process to stop
if(ready){
if(PPS::plist.size() == 0 ){
start = EXIT_PROCESS;
MPI::COMM_WORLD.Send(&start, 1, MPI::INT, status.Get_source(), 0);
countdown = countdown - 1;
}else{
//Prepare him to receive the params and start the sim (an int that contains the simulation number (-1 = exit))
start = PPS::plist.size() - 1;
MPI::COMM_WORLD.Send(&start, 1, MPI::INT, status.Get_source(), 0);
temp = PPS::plist.back();
//temp.N = status.Get_source() * 10;
//Deploy the parameterer struct
MPI::COMM_WORLD.Send(&temp, 1, MPIPPSTRUCT, status.Get_source(), 0);
//Pullout the parameter struct from the list
plist.pop_back();
}
}
ready = false;
}
logsfile.close();
}else{
int status;
bool ready = true;
struct parameters recvparams;
while(true){
status == EXIT_PROCESS;
//Send with a point to point that you are free
MPI::COMM_WORLD.Send(&ready, 1, MPI::BOOL, 0, 0);
//receive status value to exit or to receive a new params struct to start new sim
MPI::COMM_WORLD.Recv(&status, 1, MPI::INT, 0, 0);
if(status != EXIT_PROCESS){
//wait to receive parameters
//std::this_thread::sleep_for(std::chrono::seconds(PPS::pid));
MPI::COMM_WORLD.Recv(&recvparams, 1, MPIPPSTRUCT, 0, 0);
//Start sim
//std::cout << "//////////////////////////////////////////////////////////////////////////////////"<< std::endl;
//std::cout << "SAY HI: "<< PPS::pid << std::endl;
//print_params(recvparams);
//std::cout << "STARTING REAL SIM"<< std::endl;
//.........这里部分代码省略.........