本文整理汇总了C++中MPI_Probe函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Probe函数的具体用法?C++ MPI_Probe怎么用?C++ MPI_Probe使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Probe函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: MPI_Probe
void Communicator::waitForMessage()
{
if(!initialized) return;
MPI_Status status;
MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
}
示例2: MPI_Probe
void MPICommunicator::communicate(DataField* field){
std::string controlName = "CS";
MPI_Comm csComm = clientNameCommMap[controlName];
MPI_Status status;
// Receiving the clinet name to communicate with
MPI_Probe(0, MPI_ANY_TAG, csComm, &status);
int lengthOfClientName;
MPI_Get_count(&status, MPI_CHAR, &lengthOfClientName);
char *buf = new char[lengthOfClientName];
MPI_Recv(buf,lengthOfClientName,MPI_CHAR,0,MPI_ANY_TAG,csComm, &status);
std::string clientName(buf, lengthOfClientName);
delete [] buf;
// Receiving what to do with the client
int whatToDo;
MPI_Recv(&whatToDo,1,MPI_INT,0,MPI_ANY_TAG,csComm, &status);
switch(whatToDo){
case 1: // Receive
MPI_Comm clientToRecvFrom = clientNameCommMap[clientName];
field->receive(clientToRecvFrom);
break;
case 2: // Send
MPI_Comm clientToSendTo = clientNameCommMap[clientName];
field->send(clientToSendTo);
break;
}
}
示例3: MPI_Recv_vector
int MPI_Recv_vector(int id, std::vector<T> &buf)
{
MPI_Status status;
MPI_Datatype mpitype;
int count;
long ind;
MPI_Probe(id, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
if (status.MPI_TAG == TAG_LDOUBLE_COMPLEX_VEC)
{
if (typeid(T) != typeid(long double)) throw std::runtime_error("Types don't match");
mpitype = MPI_LONG_DOUBLE;
} else if (status.MPI_TAG == TAG_DOUBLE_COMPLEX_VEC) {
if (typeid(T) != typeid(double)) throw std::runtime_error("Types don't match");
mpitype = MPI_DOUBLE;
} else if (status.MPI_TAG == TAG_FLOAT_COMPLEX_VEC) {
if (typeid(T) != typeid(float)) throw std::runtime_error("Types don't match");
mpitype = MPI_FLOAT;
}
MPI_Get_count(&status, mpitype, &count);
buf.resize(count);
if (buf.size() < count) throw std::runtime_error("Vector smaller than count.");
MPI_Recv(buf.data(), count, mpitype, id, status.MPI_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
return 0;
}
示例4: vsg_packed_msg_recv
/**
* vsg_packed_msg_recv:
* @pm: a #VsgPackedMsg.
* @src: the source task id. Can be %MPI_ANY_SOURCE.
* @tag: an integer message tag. Can be %MPI_ANY_TAG.
*
* Receives a message from source @src with @tag message tag and stores it in
* @pm. any previously stored data will be lost.
*/
void vsg_packed_msg_recv (VsgPackedMsg *pm, gint src, gint tag)
{
MPI_Status status;
gint ierr;
gint rsize = 0;
g_assert (pm->own_buffer == TRUE);
MPI_Probe (src, tag, pm->communicator, &status);
MPI_Get_count (&status, MPI_PACKED, &rsize);
if (rsize > pm->allocated)
{
pm->buffer = g_realloc (pm->buffer, rsize);
pm->allocated = rsize;
}
pm->size = rsize;
ierr = MPI_Recv (pm->buffer, rsize, MPI_PACKED, status.MPI_SOURCE,
status.MPI_TAG, pm->communicator, &status);
_recv_count ++;
_recv_size += rsize;
_trace_write_msg_recv (pm, "recv", status.MPI_SOURCE, status.MPI_TAG);
pm->position = _PM_BEGIN_POS;
if (ierr != MPI_SUCCESS) vsg_mpi_error_output (ierr);
}
示例5: main
int main(int argc, char** argv){
MPI_Init(NULL, NULL);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
const int MAX_NUMBERS = 100;
int numbers[MAX_NUMBERS];
int number_count;
if(world_rank == 0){
srand(time(NULL));
number_count = (rand() / (float)RAND_MAX) * MAX_NUMBERS;
MPI_Send(numbers, number_count, MPI_INT, 1, 0, MPI_COMM_WORLD );
printf("Number count: %d\n", number_count);
}else if(world_rank == 1){
MPI_Status status;
MPI_Probe(0, 0, MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_INT, &number_count);
int* number_buff = (int*)malloc(sizeof(int) * number_count);
MPI_Recv(number_buff, number_count, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
printf("Dynamically received: %d from 0.\n", number_count);
free(number_buff);
}
MPI_Finalize();
}
示例6: main
int main( int argc, char *argv[] )
{
int rank, size;
int provided;
char buffer[100];
MPI_Status status;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (provided != MPI_THREAD_MULTIPLE)
{
if (rank == 0)
{
printf("MPI_Init_thread must return MPI_THREAD_MULTIPLE in order for this test to run.\n");
fflush(stdout);
}
MPI_Finalize();
return -1;
}
MTest_Start_thread(send_thread, NULL);
MPI_Probe(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
MPI_Recv(buffer, sizeof(buffer), MPI_CHAR, rank, 0, MPI_COMM_WORLD, &status);
MTest_Join_threads();
MTest_Finalize(0);
MPI_Finalize();
return 0;
}
示例7: MPI_Slave_Recv_buf_from_Scalar_real
int MPI_Slave_Recv_buf_from_Scalar_real(T *buf, const ptrdiff_t local_n0, const ptrdiff_t local_0_start, const ptrdiff_t N0, const ptrdiff_t N1)
{
MPI_Status status;
auto mpitype = ionsim::convert_typeid_to_mpi<T>();
int count;
MPI_Send_local(local_n0, local_0_start, N0, N1);
MPI_Probe(0, TAG_MASTER_SLAVE, MPI_COMM_WORLD, &status);
MPI_Get_count(&status, mpitype, &count);
if (count != local_n0*N1) throw std::runtime_error("Receiving incommensurate number of counts!");
MPI_Recv(buf, local_n0*N1, mpitype, 0, TAG_MASTER_SLAVE, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
JTF_PRINTVAL(buf[0]);
T sum = 0;
std::vector<ptrdiff_t> x_pts = {local_n0, N1};
for (ptrdiff_t i=0; i<local_n0; i++)
{
for (ptrdiff_t j=0; j<N1; j++)
{
sum += buf[ionsim::row_major(x_pts, i, j)];
}
}
JTF_PRINT_NOEND(Sum of received: ) << sum << std::endl;
return 0;
}
示例8: MPI_Probe
int edaMpiWrapperControl::polling( int &nodeID )
{
MPI_Status mpiStat;
MPI_Probe( MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &mpiStat );
nodeID = mpiStat.MPI_SOURCE;
return POLLING_FINISHED;
}
示例9: mpiMapReduce
/**
* Performs map reduce using mpi message passing.
* Assumes numprocs is a power of 2. The result will
* be stored in the map on process with rank 0.
* @param map local map to reduce
* @param rank mpi process rank
* @param numprocs number of mpi processes.
*/
void mpiMapReduce(HashMap* map, int rank, int numprocs) {
int s;
int count;
MPI_Status status;
char* buffer;
//printf("starting map reduce\n");
for (s = numprocs / 2; s > 0; s = s/2) {
if (rank < s) {
// Get the number of elements being sent to us.
//printf("getting next reduce for rank %d\n", rank);
MPI_Probe(rank + s, 0, MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_CHAR, &count);
// receive from higher process. merge data.
buffer = (char*)malloc(sizeof(char) * count);
//printf("received %d bytes at %d from %d\n", count, rank, rank + s);
MPI_Recv(buffer, count, MPI_CHAR, rank + s, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// do stuff with data
addSerializedToMap(map, (unsigned char*) buffer, count);
//printf("finished reduce at %d\n", rank);
free(buffer);
} else if (rank < 2 * s) {
// serialize hashmap for sending.
uint32_t nBytes;
//printf("sending data from %d to %d\n", rank, rank - s);
buffer = (char*)serializeMap(map, &nBytes);
//printf("serialized map for sending in rank %d\n", rank);
MPI_Send(buffer, nBytes, MPI_CHAR, rank - s, 0, MPI_COMM_WORLD);
free(buffer);
}
}
}
示例10: mpi_recv_stack
/**
* MPI prijem zasobniku a spojeni s lokalnim zasobnikem.
*/
void mpi_recv_stack(const int src) {
assert(t);
assert(s);
MPI_Status mpi_status;
unsigned int l;
char *b = NULL;
MPI_Recv(&l, 1, MPI_UNSIGNED, src, MSG_STACK,
MPI_COMM_WORLD, &mpi_status);
srpdebug("mpi", node, "prijeti zasobnik <l=%db, src=%d>",
l, mpi_status.MPI_SOURCE);
// naalokuju buffer a zahajim blokujici cekani na MSG_STACK_DATA
srpdebug("mpi", node, "cekani na zpravu <delka=%db>", l);
MPI_Probe(src, MSG_STACK_DATA, MPI_COMM_WORLD,
&mpi_status);
b = (char *)utils_malloc(l * sizeof(char));
MPI_Recv(b, l, MPI_PACKED, src, MSG_STACK_DATA,
MPI_COMM_WORLD, &mpi_status);
stack_t *sn = stack_mpiunpack(b, t, l);
free(b);
srpdebug("mpi", node, "prijeti zasobnik <s=%d>", sn->s);
// sloucit zasobniky
stack_merge(s, sn);
}
示例11: BIL_Sched_swap_and_merge_unique_group_names
void BIL_Sched_swap_and_merge_unique_group_names(
int is_sender, int swapper_rank, char** unique_group_names_ret,
int* num_unique_groups_ret) {
char* unique_group_names = *unique_group_names_ret;
int num_unique_groups = *num_unique_groups_ret;
int group_name_size = BIL_MAX_FILE_NAME_SIZE + BIL_MAX_VAR_NAME_SIZE;
if (is_sender == 1) {
MPI_Send((char*)unique_group_names, group_name_size * num_unique_groups,
MPI_CHAR, swapper_rank, 0, BIL->world_comm);
} else {
// Probe for the message size.
MPI_Status status;
MPI_Probe(swapper_rank, 0, BIL->world_comm, &status);
int group_name_recv_amount;
MPI_Get_count(&status, MPI_CHAR, &group_name_recv_amount);
unique_group_names = BIL_Misc_realloc(unique_group_names,
num_unique_groups * group_name_size +
group_name_recv_amount);
// Receive group names from the outer processes.
MPI_Recv(unique_group_names + (num_unique_groups * group_name_size),
group_name_recv_amount, MPI_CHAR,
swapper_rank, 0, BIL->world_comm, MPI_STATUS_IGNORE);
num_unique_groups += group_name_recv_amount / group_name_size;
// Merge the group names into an array of unique names.
BIL_Sched_merge_unique_group_names(&unique_group_names,
&num_unique_groups);
*num_unique_groups_ret = num_unique_groups;
*unique_group_names_ret = unique_group_names;
}
}
示例12: MPI_Mprobe
// unsafe MProbe
inline int MPI_Mprobe(int source, int tag, MPI_Comm comm,
MPI_Message *message, MPI_Status *status){
message->__tag = tag;
message->__comm = comm;
message->__source = source;
return MPI_Probe(source, tag, comm, status);
}
示例13: wait_for_signal
int wait_for_signal(MPI_Comm comm, int signal_num) {
int new_failures, ret;
MPI_Status status;
while(cur_epoch < signal_num) {
/*
* MPI_Probe is a blocking call that will unblock when it encounters an
* error on the indicated peer. By passing MPI_ANY_SOURCE, it will cause
* MPI_Probe to unblock on the first un-recognized failed process.
*
* The error handler will be called before MPI_Probe returns.
*
* In this example, MPI_Probe will only return when there is an error
* since we do not send messages in this example. But we check the
* return code anyway, just for good form.
*/
ret = MPI_Probe(MPI_ANY_SOURCE, TAG_FT_DETECT, comm, &status);
if( MPI_SUCCESS != ret ) {
printf("%2d of %2d) MPI_Probe() Error: Some rank failed (error = %3d)\n",
mpi_mcw_rank, mpi_mcw_size,
status.MPI_ERROR);
/* Recognize the failure and move on */
OMPI_Comm_failure_ack(comm);
}
sleep(1);
}
return 0;
}
示例14: scribe_process
void scribe_process() {
MPI_Status status;
int array_buffer_size;
int * work_array;
int current_largest_size = 0;
work_item work;
while (TRUE) {
MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
if (status.MPI_TAG == TERMINATE)
return;
MPI_Get_count(&status, MPI_INT, &array_buffer_size);
work_array = (int *) malloc(sizeof(int) * array_buffer_size);
MPI_Recv(work_array, array_buffer_size, MPI_INT, status.MPI_SOURCE, SEQUENCE_TERMINATED, MPI_COMM_WORLD, &status);
work = convert_array_to_work(work_array);
if (work->size > current_largest_size) {
current_largest_size = work->size;
display_work_item(work);
}
free(work);
free(work_array);
}
}
示例15: master
void master() {
MPI_Status stat , stat2 ;
int j;
int intervals = 1000000000;
double delta,x, pi = 0.0;
delta = 1.0 / (double)intervals;
int job = 0;
//loop
while ( intervals >= 0 || job > 0 ) {
// Wait for any incomming message
MPI_Probe (MPI_ANY_SOURCE , MPI_ANY_TAG , MPI_COMM_WORLD , & stat ) ;
// Store rank of receiver into slave_rank
int slave_rank = stat.MPI_SOURCE;
// Decide according to the tag which type of message we have got
if ( stat.MPI_TAG == TAG_ASK_FOR_JOB ) {
MPI_Recv (... , slave_rank , TAG_ASK_FOR_JOB , MPI_COMM_WORLD , & stat2 ) ;
if (/* there are unprocessed jobs */) {
// here we have unprocessed jobs , we send one job to the slave
job++;
/* pack data of job into the buffer msg_buffer */
MPI_Send ( msg_buffer , 1 , slave_rank , TAG_JOB_DATA , MPI_COMM_WORLD );
/* mark slave with rank my_rank as working on a job */
} else {
// send stop msg to slave
MPI_Send (... , slave_rank , TAG_STOP , MPI_COMM_WORLD);
job--;
}
} else {