本文整理汇总了C++中MPI_Scan函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Scan函数的具体用法?C++ MPI_Scan怎么用?C++ MPI_Scan使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Scan函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc,char *argv[])
{
int rank, num_of_processes;
int i;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Init(&argc,&argv);
MPI_Comm_size( comm, &num_of_processes);
MPI_Comm_rank( comm, &rank);
int localsum = 0;
int globalsum = 0;
int expectedsum = 0;
if(rank == 0) {
printf("Checking mpi_scan(sum)... (if you see no output then you are good)\n");
}
localsum = do_something(rank, 2);
globalsum = 0;
MPI_Scan(&localsum,&globalsum,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
expectedsum = 0;
// count upto my rank and verify that that was the return from scan
for(i=0; i<rank+1; i++) {
expectedsum = expectedsum + do_something(i, 2);
}
if (globalsum != expectedsum) {
printf("ERROR: Expected %d got %d [rank:%d]\n", expectedsum, globalsum, rank);
}
MPI_Finalize();
}
示例2: main
int main (int argc, char** argv) {
int rank, size,i,t,sum;
MPI_Status stat;
MPI_Request sendreq, recvreq;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int sendbuf[1];
int recvbuf[1];
int finalbuf[size];
sendbuf[0]=rank+1;
MPI_Scan(&sendbuf, &recvbuf, 1, MPI_INT, MPI_PROD, MPI_COMM_WORLD);
MPI_Gather(&recvbuf,1,MPI_INT, &finalbuf, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(rank==0)
{
for(int i=0; i<size;i++)
{
printf("%d: Result %d: %d\n", rank, i,finalbuf[i]);
}
}
}
示例3: Zoltan_Get_Distribution
/*
* Compute an array that contains the cumulative sum of objects
* on each processor.
*
* Memory for the vtxdist array is allocated here,
* but must be freed by the calling routine.
*
*/
int Zoltan_Get_Distribution(ZZ *zz, int **vtxdist)
{
int ierr = ZOLTAN_OK, num_obj;
char *yo = "Zoltan_Get_Distribution";
num_obj = zz->Get_Num_Obj(zz->Get_Num_Obj_Data, &ierr);
if (ierr != ZOLTAN_OK && ierr != ZOLTAN_WARN){
/* Return error code */
ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Error in Get_Num_Obj.");
return (ierr);
}
*vtxdist = (int *) ZOLTAN_MALLOC((zz->Num_Proc+1)*sizeof(int));
if (num_obj>0){
if (!(*vtxdist)){
/* Not enough memory */
ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Out of memory.");
return ZOLTAN_MEMERR;
}
}
/* Construct *vtxdist[i] = the number of objects on all procs < i. */
/* Scan to compute partial sums of the number of objs */
MPI_Scan (&num_obj, *vtxdist, 1, MPI_INT, MPI_SUM, zz->Communicator);
/* Gather data from all procs */
MPI_Allgather (&((*vtxdist)[0]), 1, MPI_INT,
&((*vtxdist)[1]), 1, MPI_INT, zz->Communicator);
(*vtxdist)[0] = 0;
return ZOLTAN_OK;
}
示例4: MPIStream_SetOffset
Bool MPIStream_SetOffset( Stream* stream, SizeT sizeToWrite, MPI_Comm communicator ) {
MPI_Offset offset = 0;
int rank;
int nproc;
unsigned int localSizeToWrite;
unsigned int sizePartialSum;
if ( stream->_file == NULL ) {
return False;
}
if ( stream->_file->type != MPIFile_Type ) {
return False;
}
MPI_Comm_rank( communicator, &rank );
MPI_Comm_size( communicator, &nproc );
/* Sum up the individual sizeToWrites for processors lower than this one */
localSizeToWrite = sizeToWrite;
MPI_Scan( &localSizeToWrite, &sizePartialSum, 1, MPI_UNSIGNED, MPI_SUM, communicator );
/* Now, just subtract the sizeToWrite of current processor to get our start point */
offset = sizePartialSum - localSizeToWrite;
MPI_File_seek( *(MPI_File*)stream->_file->fileHandle, offset, MPI_SEEK_SET );
return True;
}
示例5: scan
static void scan(const communicator& comm, const T& in, T& out, const Op&)
{
MPI_Scan(Datatype::address(const_cast<T&>(in)),
Datatype::address(out),
Datatype::count(in),
Datatype::datatype(),
detail::mpi_op<Op>::get(),
comm);
}
示例6: NULL_USE
int
SAMRAI_MPI::Scan(
void* sendbuf,
void* recvbuf,
int count,
Datatype datatype,
Op op) const
{
#ifndef HAVE_MPI
NULL_USE(sendbuf);
NULL_USE(recvbuf);
NULL_USE(count);
NULL_USE(datatype);
NULL_USE(op);
#endif
int rval = MPI_SUCCESS;
if (!s_mpi_is_initialized) {
TBOX_ERROR("SAMRAI_MPI::Scan is a no-op without run-time MPI!");
}
#ifdef HAVE_MPI
else {
rval = MPI_Scan(sendbuf, recvbuf, count, datatype, op, d_comm);
}
#endif
return rval;
}
示例7: main
int main(int argc, char *argv[])
{
int root = 0;
int processCount;
int currentRank;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD,&processCount);
MPI_Comm_rank(MPI_COMM_WORLD,¤tRank);
int reduce = currentRank;
int reduce2 = currentRank;
int reduce3 = 0;
MPI_Scan(¤tRank,&reduce,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
printf("Scan: process %d: reduce = %d\n", currentRank, reduce);
MPI_Exscan(¤tRank,&reduce2,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
printf("Exscan: process %d: reduce = %d\n", currentRank, reduce2);
MPI_Reduce(¤tRank,&reduce3,1,MPI_INT,MPI_SUM, 0, MPI_COMM_WORLD);
if(currentRank==0)
printf("Reduce: process %d: reduce = %d\n", currentRank, reduce3);
MPI_Finalize();
return 0;
}
示例8: finishParticlesInitialization
/* Completes particle distribution */
void finishParticlesInitialization(uint64_t n, particle_t *p) {
double x_coord, y_coord, rel_x, rel_y, cos_theta, cos_phi, r1_sq, r2_sq, base_charge, ID;
uint64_t x, pi, cumulative_count;
MPI_Scan(&n, &cumulative_count, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
ID = (double) (cumulative_count - n + 1);
int my_ID;
MPI_Comm_rank(MPI_COMM_WORLD, &my_ID);
for (pi=0; pi<n; pi++) {
x_coord = p[pi].x;
y_coord = p[pi].y;
rel_x = fmod(x_coord,1.0);
rel_y = fmod(y_coord,1.0);
x = (uint64_t) x_coord;
r1_sq = rel_y * rel_y + rel_x * rel_x;
r2_sq = rel_y * rel_y + (1.0-rel_x) * (1.0-rel_x);
cos_theta = rel_x/sqrt(r1_sq);
cos_phi = (1.0-rel_x)/sqrt(r2_sq);
base_charge = 1.0 / ((DT*DT) * Q * (cos_theta/r1_sq + cos_phi/r2_sq));
p[pi].v_x = 0.0;
p[pi].v_y = ((double) p[pi].m) / DT;
/* this particle charge assures movement in positive x-direction */
p[pi].q = (x%2 == 0) ? (2*p[pi].k+1)*base_charge : -1.0 * (2*p[pi].k+1)*base_charge ;
p[pi].x0 = x_coord;
p[pi].y0 = y_coord;
p[pi].ID = ID;
ID += 1.0;
}
}
示例9: partial_sum_to_all
int partial_sum_to_all(int in) {
int out = in;
#ifdef HAVE_MPI
MPI_Scan(&in,&out,1,MPI_INT,MPI_SUM,mycomm);
#endif
return out;
}
示例10: mpi_scan_
FORT_DLL_SPEC void FORT_CALL mpi_scan_ ( void*v1, void*v2, MPI_Fint *v3, MPI_Fint *v4, MPI_Fint *v5, MPI_Fint *v6, MPI_Fint *ierr ){
#ifndef HAVE_MPI_F_INIT_WORKS_WITH_C
if (MPIR_F_NeedInit){ mpirinitf_(); MPIR_F_NeedInit = 0; }
#endif
if (v1 == MPIR_F_MPI_IN_PLACE) v1 = MPI_IN_PLACE;
*ierr = MPI_Scan( v1, v2, *v3, (MPI_Datatype)(*v4), *v5, (MPI_Comm)(*v6) );
}
示例11: caml_mpi_scan_int
value caml_mpi_scan_int(value data, value op, value comm)
{
long d = Long_val(data);
long r;
MPI_Scan(&d, &r, 1, MPI_LONG, reduce_intop[Int_val(op)], Comm_val(comm));
return Val_long(r);
}
示例12: caml_mpi_scan_float
value caml_mpi_scan_float(value data, value op, value comm)
{
double d = Double_val(data), r;
MPI_Scan(&d, &r, 1, MPI_DOUBLE,
reduce_floatop[Int_val(op)], Comm_val(comm));
return copy_double(r);
}
示例13: FC_FUNC
FC_FUNC( mpi_scan , MPI_SCAN)
( void *sendbuf, void *recvbuf, int *count,
int *datatype, int *op, int *comm,
int *ierror)
{
*ierror=MPI_Scan( sendbuf, recvbuf, *count,
*datatype, *op, *comm);
}
示例14: MPI_Comm_rank
void dummy_operations::run_collective_dummy_operations() {
int rank, size;
MPI_Comm_rank( MPI_COMM_WORLD, &rank);
MPI_Comm_size( MPI_COMM_WORLD, &size);
// Run Broadcast
{
int x;
MPI_Comm_rank( MPI_COMM_WORLD, &x);
MPI_Bcast(&x, 1, MPI_INT, 0, MPI_COMM_WORLD);
}
// Run Allgather.
{
int x, size;
MPI_Comm_rank( MPI_COMM_WORLD, &x);
MPI_Comm_size( MPI_COMM_WORLD, &size);
std::vector<int> rcv(size);
MPI_Allgather(&x, 1, MPI_INT, &rcv[0], 1, MPI_INT, MPI_COMM_WORLD);
}
// Run Allreduce.
{
int x;
MPI_Comm_rank( MPI_COMM_WORLD, &x);
int y = 0;
MPI_Allreduce(&x, &y, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
}
// Dummy Prefix Sum
{
int x = 1;
int y = 0;
MPI_Scan(&x, &y, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
}
// Run Alltoallv.
{
std::vector<int> snd(size);
std::vector<int> rcv(size);
std::vector<int> scounts(size, 1);
std::vector<int> rcounts(size, 1);
std::vector<int> sdispls(size);
std::vector<int> rdispls(size);
for (int i = 0, iend = sdispls.size(); i < iend; ++i) {
sdispls[i] = rdispls[i] = i;
}
MPI_Alltoallv(&snd[0], &scounts[0], &sdispls[0], MPI_INT,
&rcv[0], &rcounts[0], &rdispls[0], MPI_INT, MPI_COMM_WORLD);
}
}
示例15: MyMPI_Scan
/**
* @brief Wrapper around MPI_Scan
*
* We check the error code to detect MPI errors and use the default communicator
* MPI_WORLD.
*
* @param sendbuf Buffer that is being sent
* @param recvbuf Buffer to receive in
* @param count Number of elements to be sent
* @param datatype MPI datatype of the elements
* @param op Global reduce operation
*/
inline void MyMPI_Scan(void* sendbuf, void* recvbuf, int count,
MPI_Datatype datatype, MPI_Op op) {
MPIGlobal::commtimer.start();
int status =
MPI_Scan(sendbuf, recvbuf, count, datatype, op, MPI_COMM_WORLD);
if(status != MPI_SUCCESS) {
std::cerr << "Error during MPI_Scan!" << std::endl;
my_exit();
}
MPIGlobal::commtimer.stop();
}