本文整理汇总了C++中MPI_Gatherv函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Gatherv函数的具体用法?C++ MPI_Gatherv怎么用?C++ MPI_Gatherv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Gatherv函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: parallelMatrixTimesVector
void parallelMatrixTimesVector(int local_rows, int cols,
double *local_A, double *b, double *y,
int root, int my_rank, int p, MPI_Comm comm)
{
/*
This function performs parallel matrix-vector multiplication of a
matrix A times vector b. The matrix is distributed by rows. Each
process contains (local_rows)x(cols) matrix local_A stored as a
one-dimensional array. The vector b is stored on each process.
Each process computes its result and then process root
collects the resutls and returns it in y.
local_rows is the number of rows on my_rank
cols is the number of columns on each process
local_A is a pointer to the matrix on my_rank
b is a pointer to the vector b of size cols
y is a pointer to the result on the root process.
y is significant only on root.
*/
double *local_y = malloc(sizeof(double)*local_rows);
/* Compute the local matrix times vector */
compMatrixTimesVector(local_rows, cols, local_A, b, local_y);
int sendcount = local_rows; /* number of doubles sent by process my_rank */
int *reccounts; /* reccounts[i] is the number of doubles received from process i */
int *displs; /* displs for the MPI_Gatherv function */
if (my_rank != root)
{
/* Send the sendcounts to the root process. reccounts does not matter here. */
MPI_Gather(&sendcount, 1, MPI_INT, reccounts, 1, MPI_INT, root, comm);
/* Send the computed results to the root process. The receive
buffer, reccounts, and displs do not matter here. */
MPI_Gatherv(local_y, sendcount, MPI_DOUBLE,
y, reccounts, displs, MPI_DOUBLE, root, comm);
}
else /* we are on root process */
{
/* Gatter the receive counts from each process */
reccounts = malloc(sizeof(int)*p);
MPI_Gather(&sendcount, 1, MPI_INT, reccounts, 1, MPI_INT, 0, comm);
/* Calculate displs for MPI_Gatterv */
displs = malloc(sizeof(int)*p);
int i;
displs[0] = 0;
for (i = 1; i < p; i++)
displs[i] = displs[i-1] + reccounts[i-1];
/* Gather the results on process 0 */
MPI_Gatherv(local_y, sendcount, MPI_DOUBLE,
y, reccounts, displs, MPI_DOUBLE, root, comm);
free(displs);
free(reccounts);
}
free(local_y);
}
示例2: main
int main(int argc, char **argv)
{
if (MPI_Init(&argc, &argv) != MPI_SUCCESS) {
fprintf(stderr, "MPI initialization failed.\n");
return 1;
}
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size < 2) {
fprintf(stderr, "cant play this game alone.\n");
return 1;
}
srand(rank + MPI_Wtime());
int sendcount = rand()%10 + 1;
char sendbuf[sendcount];
for (int i = 0; i < sendcount; i++)
sendbuf[i] = '0' + rank%10;
fprintf(stderr, "[ %d ] sendcount: %d\n", rank, sendcount);
int recvcounts[size];
if (MPI_Allgather(&sendcount, 1, MPI_INT, &recvcounts, 1, MPI_INT, MPI_COMM_WORLD)) {
fprintf(stderr, "MPI_Allgather failed\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
int totalcount = 0;
for (int i = 0; i < size; i++)
totalcount += recvcounts[i];
fprintf(stderr, "[ %d ] totalcount: %d\n", rank, totalcount);
char recvbuf[totalcount+1];
memset(recvbuf, 0, sizeof(recvbuf));
if (rank) {
if (MPI_Gatherv(sendbuf, sendcount, MPI_CHAR, 0, 0, 0, 0, 0, MPI_COMM_WORLD)) {
fprintf(stderr, "MPI_Gatherv failed\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
} else {
int displs[size];
displs[0] = 0;
for (int i = 1; i < size; i++)
displs[i] = displs[i - 1] + recvcounts[i - 1];
if (MPI_Gatherv(sendbuf, sendcount, MPI_CHAR, recvbuf, recvcounts, displs, MPI_CHAR, 0, MPI_COMM_WORLD)) {
fprintf(stderr, "MPI_Gatherv failed\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
fprintf(stderr, "[ %d ] received Gatherv \"%s\"\n", rank, recvbuf);
}
if (MPI_Bcast(recvbuf, totalcount, MPI_CHAR, 0, MPI_COMM_WORLD)) {
fprintf(stderr, "MPI_Bcast failed\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
fprintf(stderr, "[ %d ] received Bcast \"%s\"\n", rank, recvbuf);
MPI_Finalize();
return 0;
}
示例3: collect_x_old
void collect_x_old(double **x,double *xold)
{
int i,iunk,loc_inode,nunk_per_proc;
int *index=NULL;
double *unk_global, *unk_loc;
/* allocate temporary arrays */
nunk_per_proc = Nnodes_per_proc*Nunk_per_node;
unk_loc = (double *) array_alloc (1, nunk_per_proc, sizeof(double));
for (loc_inode=0; loc_inode < Nnodes_per_proc; loc_inode++ )
for (iunk=0; iunk<Nunk_per_node; iunk++){
unk_loc[iunk+Nunk_per_node*loc_inode] = x[iunk][L2B_node[loc_inode]]; /* always use nodal ordering here */
}
if (Proc == 0) {
unk_global = (double *) array_alloc (1, Nunknowns, sizeof(double));
index = (int *) array_alloc (1, Nnodes, sizeof(int));
}
else {
unk_global=NULL;
index=NULL;
}
/* collect the node numbers from all the processors */
MPI_Gatherv(L2G_node,Nnodes_per_proc,MPI_INT,
index,Comm_node_proc,Comm_offset_node,
MPI_INT,0,MPI_COMM_WORLD);
/* collect the unknowns from all the processors */
MPI_Gatherv(unk_loc,nunk_per_proc,MPI_DOUBLE,
unk_global,Comm_unk_proc,Comm_offset_unk,
MPI_DOUBLE,0,MPI_COMM_WORLD);
safe_free((void *) &unk_loc);
if (Proc == 0){
for (i=0; i<Nnodes; i++){
for (iunk=0; iunk<Nunk_per_node; iunk++){
xold[index[i]*Nunk_per_node+iunk] = unk_global[i*Nunk_per_node+iunk];
}
}
safe_free((void *) &unk_global);
safe_free((void *) &index);
}
safe_free((void *) &unk_loc);
return;
}
示例4: fprintf
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
void Image_Exchanger::exchange_fragment_images(unsigned int* databuf,
int nviewer,
ImageFragment_Tile* ift)
{
// fprintf(stderr, "**** %s:%s() ****\n", __FILE__, __func__);
#ifdef _DEBUG7
fprintf(stderr, "**** %s:%s() ****\n", __FILE__, __func__);
#endif
unsigned int* sendbuf = databuf + m_sbuf_offset;
unsigned int* recvbuf = databuf + m_rbuf_offset;
if(nviewer == 1)
{
MPI_Gatherv((int*)sendbuf, m_scounts[0], MPI_INT,
(int*)recvbuf, m_rcounts, m_rdispls, MPI_INT,
0, MPI_COMM_WORLD);
}
else
{
MPI_Alltoallv( (int*)sendbuf, m_scounts, m_sdispls, MPI_INT,
(int*)recvbuf, m_rcounts, m_rdispls, MPI_INT,
MPI_COMM_WORLD);
}
ift->address_fragments(m_rbuf_offset, m_rdispls);
}
示例5: mpi_gatherv_f
void mpi_gatherv_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs,
MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm,
MPI_Fint *ierr)
{
MPI_Comm c_comm;
MPI_Datatype c_sendtype, c_recvtype;
int size;
OMPI_ARRAY_NAME_DECL(recvcounts);
OMPI_ARRAY_NAME_DECL(displs);
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
MPI_Comm_size(c_comm, &size);
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
OMPI_ARRAY_FINT_2_INT(displs, size);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
*ierr = OMPI_INT_2_FINT(MPI_Gatherv(sendbuf, OMPI_FINT_2_INT(*sendcount),
c_sendtype, recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
OMPI_ARRAY_NAME_CONVERT(displs),
c_recvtype,
OMPI_FINT_2_INT(*root),
c_comm));
}
示例6: invoke
void invoke() {
if (!has_contiguous_data(lhs)) TRIQS_RUNTIME_ERROR << "mpi gather of array into a non contiguous view";
auto c = laz.c;
auto recvcounts = std::vector<int>(c.size());
auto displs = std::vector<int>(c.size() + 1, 0);
int sendcount = laz.ref.domain().number_of_elements();
auto D = mpi::mpi_datatype<typename A::value_type>();
auto d = laz.domain();
if (laz.all || (laz.c.rank() == laz.root)) resize_or_check_if_view(lhs, d.lengths());
void *lhs_p = lhs.data_start();
const void *rhs_p = laz.ref.data_start();
auto mpi_ty = mpi::mpi_datatype<int>();
if (!laz.all)
MPI_Gather(&sendcount, 1, mpi_ty, &recvcounts[0], 1, mpi_ty, laz.root, c.get());
else
MPI_Allgather(&sendcount, 1, mpi_ty, &recvcounts[0], 1, mpi_ty, c.get());
for (int r = 0; r < c.size(); ++r) displs[r + 1] = recvcounts[r] + displs[r];
if (!laz.all)
MPI_Gatherv((void *)rhs_p, sendcount, D, lhs_p, &recvcounts[0], &displs[0], D, laz.root, c.get());
else
MPI_Allgatherv((void *)rhs_p, sendcount, D, lhs_p, &recvcounts[0], &displs[0], D, c.get());
}
示例7: gatherVector
void gatherVector(float *localProdVec, int *rowInfo, float *prodVec) {
int myRank, numProcs;
int i;
int *displs, *recvCount;
int myRowCount;
MPI_Comm_size(MPI_COMM_WORLD, &numProcs);
MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
displs = (int *) malloc(sizeof(int) * numProcs);
recvCount = (int *) malloc(sizeof(int) * numProcs);
if (myRank == ROOT) {
for (i = 0; i < numProcs; i++) {
//get offset and count of rows to be received from proc i
displs[i] = rowInfo[i];
recvCount[i] = rowInfo[i+numProcs] - rowInfo[i] + 1;
}
}
myRowCount = rowInfo[myRank+numProcs] - rowInfo[myRank] + 1;
//gather this computed vector at root
MPI_Gatherv(localProdVec, myRowCount, MPI_FLOAT,
prodVec, recvCount, displs,
MPI_FLOAT, ROOT, MPI_COMM_WORLD);
free(displs);
free(recvCount);
}
示例8: distributedMatMatProd
// computes A*mat and stores result on the rank 0 process in matProd (assumes the memory has already been allocated)
void distributedMatMatProd(const double *localRowChunk, const double *mat,
double *matProd, const distMatrixInfo *matInfo, const distGatherInfo
*eigInfo, scratchMatrices * scratchSpace) {
multiplyAChunk(localRowChunk, mat, scratchSpace->Scratch3,
matInfo->localrows, matInfo->numcols, eigInfo->numeigs);
if (matInfo->mpi_rank != 0) {
MPI_Gatherv(scratchSpace->Scratch3,
matInfo->localrows*eigInfo->numeigs, MPI_DOUBLE, NULL, NULL, NULL,
MPI_DOUBLE, 0, *(matInfo->comm));
} else {
MPI_Gatherv(scratchSpace->Scratch3,
matInfo->localrows*eigInfo->numeigs, MPI_DOUBLE, matProd,
eigInfo->elementcounts, eigInfo->elementoffsets, MPI_DOUBLE, 0,
*(matInfo->comm));
}
}
示例9: time_gatherv
double time_gatherv(struct collParams* p)
{
int i, size2;
int disp = 0;
for ( i = 0; i < p->nranks; i++) {
int size2 = i % (p->size+1);
recvcounts[i] = size2;
rdispls[i] = disp;
disp += size2;
}
MPI_Barrier(MPI_COMM_WORLD);
size2 = p->myrank % (p->size+1);
__TIME_START__;
for (i = 0; i < p->iter; i++) {
MPI_Gatherv(sbuffer, size2, p->type, rbuffer, recvcounts, rdispls, p->type, p->root, p->comm);
__BAR__(p->comm);
}
__TIME_END__;
if (check_buffers) {
check_sbuffer(p->myrank);
if (p->myrank == p->root) {
for (i = 0; i < p->nranks; i++) {
check_rbuffer(rbuffer, rdispls[i], i, 0, recvcounts[i]);
}
}
}
return __TIME_USECS__ / (double)p->iter;
}
示例10: MPI_Bcast
ubjson::Value Master::ExportSimulation() {
// This method is a control method, so sends orders from master 0 to other
// masters
if (id_ == 0) {
order_ = Order::EXPORT_SIMULATION;
MPI_Bcast(&order_, 1, MPI_INT, 0, MasterComm_);
}
ubjson::Value local_agents;
std::vector<ubjson::Value> local_agents_by_types(nb_types_);
for (AgentHandler &agent_handler : agent_handlers_) {
agent_handler.GetJsonNodes(local_agents_by_types);
}
for (auto &type : agent_type_to_string_) {
local_agents[type.second] = std::move(local_agents_by_types.at(type.first));
}
// Now all the infos must be gathered in master 0
std::ostringstream local_data_stream;
ubjson::StreamWriter<std::ostringstream> writer(local_data_stream);
writer.writeValue(local_agents);
std::string local_data = local_data_stream.str();
int local_data_size = local_data.size();
// First master 0 must know how much data it will receive
std::vector<int> sizes_to_receive;
if (id_ == 0) {
sizes_to_receive.resize(nb_masters_);
}
MPI_Gather(&local_data_size, 1, MPI_INT, sizes_to_receive.data(), 1, MPI_INT, 0, MasterComm_);
// Storing the results in 'results'
std::vector<std::string> results;
if (id_ == 0) {
for (int i=0; i<nb_masters_; i++) {
results.emplace_back(std::string(sizes_to_receive.at(i), '0'));
}
}
std::vector<int> displs;
if (id_ == 0) {
for (int i=0; i<nb_masters_; i++) {
displs.push_back(results.at(i).data()-(char*)results.data());
}
}
MPI_Gatherv((void*)local_data.data(), local_data_size, MPI_UNSIGNED_CHAR,
(void*)results.data(), sizes_to_receive.data(), displs.data(), MPI_UNSIGNED_CHAR, 0, MasterComm_);
// Grouping the results
ubjson::Value agents;
for (auto &master_agents : results) {
ubjson::Value masters_value;
std::istringstream s(master_agents);
ubjson::StreamReader<std::istringstream> reader(s);
masters_value = reader.getNextValue();
for (auto &type : agent_type_to_string_) {
for (auto &agent : masters_value[type.second]) {
agents[type.second].push_back(agent);
}
}
}
ubjson::Value final;
final["agents"] = agents;
示例11: gather
static void gather(const communicator& comm, const std::vector<T>& in, std::vector< std::vector<T> >& out, int root)
{
std::vector<int> counts(comm.size());
Collectives<int,void*>::gather(comm, (int) in.size(), counts, root);
std::vector<int> offsets(comm.size(), 0);
for (unsigned i = 1; i < offsets.size(); ++i)
offsets[i] = offsets[i-1] + counts[i-1];
std::vector<T> buffer(offsets.back() + counts.back());
MPI_Gatherv(Datatype::address(const_cast<T&>(in[0])),
in.size(),
Datatype::datatype(),
Datatype::address(buffer[0]),
&counts[0],
&offsets[0],
Datatype::datatype(),
root, comm);
out.resize(comm.size());
size_t cur = 0;
for (unsigned i = 0; i < (unsigned)comm.size(); ++i)
{
out[i].reserve(counts[i]);
for (unsigned j = 0; j < (unsigned)counts[i]; ++j)
out[i].push_back(buffer[cur++]);
}
}
示例12: mpi_gatherv
void mpi_gatherv (void *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
void *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs,
MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *__ierr)
{
*__ierr = MPI_Gatherv (sendbuf, *sendcount, MPI_Type_f2c (*sendtype),
recvbuf, recvcounts, displs,
MPI_Type_f2c (*recvtype), *root,MPI_Comm_f2c (*comm));
}
示例13: gather_vectors
std::vector<int> gather_vectors(std::vector<int>& local_vec, MPI_Comm comm)
{
// get MPI parameters
int rank;
int p;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &p);
// get local size
int local_size = local_vec.size();
// init result
std::vector<int> result;
// master process: receive results
if (rank == 0)
{
// gather local array sizes, sizes are restricted to `int` by MPI anyway
// therefore use int
std::vector<int> local_sizes(p);
MPI_Gather(&local_size, 1, MPI_INT, &local_sizes[0], 1, MPI_INT, 0, comm);
// gather-v to collect all the elements
int total_size = std::accumulate(local_sizes.begin(), local_sizes.end(), 0);
result.resize(total_size);
// get receive displacements
std::vector<int> displs(p, 0);
for (int i = 1; i < p; ++i)
displs[i] = displs[i-1] + local_sizes[i-1];
// gather v the vector data to the root
MPI_Gatherv(&local_vec[0], local_size, MPI_INT,
&result[0], &local_sizes[0], &displs[0], MPI_INT, 0, comm);
}
// else: send results
else {
// gather local array sizes
MPI_Gather(&local_size, 1, MPI_INT, NULL, 1, MPI_INT, 0, comm);
// sent the actual data
MPI_Gatherv(&local_vec[0], local_size, MPI_INT,
NULL, NULL, NULL, MPI_INT, 0, comm);
}
return result;
}
示例14: FC_FUNC
FC_FUNC( mpi_gatherv , MPI_GATHERV )
( void *sendbuf, int *sendcount, int *sendtype,
void *recvbuf, int *recvcounts, int *displs,
int *recvtype, int *root, int *comm, int *ierror)
{
*ierror=MPI_Gatherv( mpi_c_in_place(sendbuf), *sendcount, *sendtype,
recvbuf, recvcounts, displs,
*recvtype, *root, *comm);
}
示例15: MPI_Gatherv
// ****************************************************************************
// Method: avtImgCommunicator::
//
// Purpose:
// Send the metadata needed by the root node to make decisions
//
// Arguments:
// arraySize : the number of elements being sent
// allIotaMetadata : the metadata bieng sent
//
// Programmer: Pascal Grosset
// Creation: July 2013
//
// Modifications:
//
// ****************************************************************************
void avtImgCommunicator::gatherIotaMetaData(int arraySize, float *allIotaMetadata){
#ifdef PARALLEL
int *recvSizePerProc = NULL;
float *tempRecvBuffer = NULL;
int *offsetBuffer = NULL;
if (my_id == 0){
tempRecvBuffer = new float[totalPatches*7]; // x7: procId, patchNumber, dims[0], dims[1], screen_ll[0], screen_ll[1], avg_z
recvSizePerProc = new int[num_procs];
offsetBuffer = new int[num_procs];
for (int i=0; i<num_procs; i++){
if (i == 0)
offsetBuffer[i] = 0;
else
offsetBuffer[i] = offsetBuffer[i-1] + recvSizePerProc[i-1];
recvSizePerProc[i] = processorPatchesCount[i]*7;
}
}
MPI_Gatherv(allIotaMetadata, arraySize, MPI_FLOAT, tempRecvBuffer, recvSizePerProc, offsetBuffer,MPI_FLOAT, 0, MPI_COMM_WORLD);// all send to proc 0
if (my_id == 0){
allRecvIotaMeta = new iotaMeta[totalPatches]; // allocate space to receive the many patches
iotaMeta tempPatch;
for (int i=0; i<totalPatches; i++){
tempPatch.procId = (int) tempRecvBuffer[i*7 + 0];
tempPatch.patchNumber = (int) tempRecvBuffer[i*7 + 1];
tempPatch.dims[0] = (int) tempRecvBuffer[i*7 + 2];
tempPatch.dims[1] = (int) tempRecvBuffer[i*7 + 3];
tempPatch.screen_ll[0] =(int) tempRecvBuffer[i*7 + 4];
tempPatch.screen_ll[1] =(int) tempRecvBuffer[i*7 + 5];
tempPatch.avg_z = tempRecvBuffer[i*7 + 6];
int patchIndex = getDataPatchID(tempPatch.procId, tempPatch.patchNumber);
allRecvIotaMeta[patchIndex] = setIota(tempPatch.procId, tempPatch.patchNumber, tempPatch.dims[0], tempPatch.dims[1], tempPatch.screen_ll[0], tempPatch.screen_ll[1], tempPatch.avg_z);
all_avgZ_proc0.insert(tempPatch.avg_z); //insert avg_zs into the set to keep a count of the total number of avg_zs
}
if (recvSizePerProc != NULL)
delete []recvSizePerProc;
recvSizePerProc = NULL;
if (offsetBuffer != NULL)
delete []offsetBuffer;
offsetBuffer = NULL;
if (tempRecvBuffer != NULL)
delete []tempRecvBuffer;
tempRecvBuffer = NULL;
}
#endif
}