本文整理汇总了C++中MPI_Free_mem函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Free_mem函数的具体用法?C++ MPI_Free_mem怎么用?C++ MPI_Free_mem使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Free_mem函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char* argv[])
{
MPI_Init(&argc,&argv);
MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &MPI_COMM_NODE);
int n = (argc>1) ? atoi(argv[1]) : 1000;
int wrank, wsize;
MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
MPI_Comm_size(MPI_COMM_WORLD, &wsize);
int nrank, nsize;
MPI_Comm_rank(MPI_COMM_WORLD, &nrank);
MPI_Comm_size(MPI_COMM_WORLD, &nsize);
char * buf1 = NULL;
char * buf2 = NULL;
MPI_Alloc_mem(n, MPI_INFO_NULL, &buf1);
MPI_Alloc_mem(n, MPI_INFO_NULL, &buf2);
memset(buf1, nrank==0 ? 'Z' : 'A', n);
memset(buf2, nrank==0 ? 'Z' : 'A', n);
double t0, t1, dt;
for (int r=0; r<20; r++) {
MPI_Barrier(MPI_COMM_WORLD);
t0 = MPI_Wtime();
MPI_Bcast(buf1, n, MPI_CHAR, 0, MPI_COMM_NODE);
t1 = MPI_Wtime();
dt = t1-t0;
printf("%d: MPI_Bcast: %lf seconds, %lf MB/s \n", wrank, dt, n*(1.e-6/dt));
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);
t0 = MPI_Wtime();
SMP_Bcast(buf2, n, MPI_CHAR, 0, MPI_COMM_NODE);
t1 = MPI_Wtime();
dt = t1-t0;
printf("%d: SMP_Bcast: %lf seconds, %lf MB/s \n", wrank, dt, n*(1.e-6/dt));
fflush(stdout);
if (r==0) {
char * tmp = malloc(n);
memset(tmp, 'Z', n);
int err1 = memcmp(tmp, buf1, n);
int err2 = memcmp(tmp, buf2, n);
if (err1>0 || err2>0) {
printf("%d: errors: MPI (%d), SMP (%d) \n", wrank, err1, err2);
}
}
}
MPI_Free_mem(buf1);
MPI_Free_mem(buf2);
MPI_Comm_free(&MPI_COMM_NODE);
MPI_Finalize();
return 0;
}
示例2: main
int main(int argc, char * argv[])
{
const MPI_Count test_int_max = BigMPI_Get_max_int();
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size<1) {
printf("Use 1 or more processes. \n");
MPI_Finalize();
return 1;
}
int l = (argc > 1) ? atoi(argv[1]) : 2;
int m = (argc > 2) ? atoi(argv[2]) : 17777;
MPI_Count n = l * test_int_max + m;
char * buf_send = NULL;
char * buf_recv = NULL;
MPI_Alloc_mem((MPI_Aint)n * size, MPI_INFO_NULL, &buf_send);
assert(buf_send!=NULL);
MPI_Alloc_mem((MPI_Aint)n, MPI_INFO_NULL, &buf_recv);
assert(buf_recv!=NULL);
if (rank==0) {
for (int i = 0; i < size; ++i) {
for (MPI_Count j = 0; j < n; ++j) {
buf_send[i*n+j] = (unsigned char)i;
}
}
}
memset(buf_recv, -1, (size_t)n);
/* collective communication */
MPIX_Scatter_x(buf_send, n, MPI_CHAR,
buf_recv, n, MPI_CHAR,
0 /* root */, MPI_COMM_WORLD);
size_t errors = verify_buffer(buf_recv, n, rank);
MPI_Free_mem(buf_send);
MPI_Free_mem(buf_recv);
if (rank==0 && errors==0) {
printf("SUCCESS\n");
}
MPI_Finalize();
return 0;
}
示例3: main
int main (int argc, char *argv[])
{
struct pe_vars v;
long * msg_buffer;
/*
* Initialize
*/
init_mpi(&v);
check_usage(argc, argv, v.npes, v.me);
print_header(v.me);
if (v.me == 0) printf("Total processes = %d\n",v.npes);
/*
* Allocate Memory
*/
msg_buffer = allocate_memory(v.me, &(v.win) );
memset(msg_buffer, 0, MAX_MSG_SZ * ITERS_LARGE * sizeof(long));
/*
* Time Put Message Rate
*/
benchmark(msg_buffer, v.me, v.pairs, v.nxtpe, v.win);
/*
* Finalize
*/
MPI_Win_unlock_all(v.win);
MPI_Win_free(&v.win);
MPI_Free_mem(msg_buffer);
MPI_Finalize();
return EXIT_SUCCESS;
}
示例4: main
int main (int argc,char *argv[]) {
int i;
double w[NEL];
MPI_Aint win_size,warr_size;
MPI_Win *win;
win_size=sizeof(MPI_Win);
warr_size=sizeof(MPI_DOUBLE)*NEL;
MPI_Init (&argc, &argv);
for(i=0;i<NTIMES;i++) {
MPI_Alloc_mem(win_size,MPI_INFO_NULL,&win);
MPI_Win_create(w,warr_size,sizeof(double),MPI_INFO_NULL,MPI_COMM_WORLD,win);
MPI_Win_free(win);
MPI_Free_mem(win);
}
MPI_Finalize();
return 0;
}
示例5: IMB_del_r_buf
void IMB_del_r_buf(struct comm_info* c_info )
/*
Deletes recv buffer component of c_info
In/out variables:
-c_info (type struct comm_info*)
Collection of all base data for MPI;
see [1] for more information
*/
{
/* July 2002 V2.2.1 change: use MPI_Free_mem */
if ( c_info->r_alloc> 0)
{
#if (defined EXT || defined MPIIO || defined RMA)
MPI_Free_mem( c_info->r_buffer );
#else
IMB_v_free( (void**)&c_info->r_buffer );
#endif
c_info-> r_alloc = 0;
c_info->r_buffer = NULL;
}
}
示例6: MTestFreeWin
/* Free the storage associated with a window object */
void MTestFreeWin(MPI_Win * win)
{
void *addr;
int flag, merr;
merr = MPI_Win_get_attr(*win, MPI_WIN_BASE, &addr, &flag);
if (merr)
MTestPrintError(merr);
if (!flag) {
MTestError("Could not get WIN_BASE from window");
}
if (addr) {
void *val;
merr = MPI_Win_get_attr(*win, mem_keyval, &val, &flag);
if (merr)
MTestPrintError(merr);
if (flag) {
if (val == (void *) 1) {
free(addr);
}
else if (val == (void *) 2) {
merr = MPI_Free_mem(addr);
if (merr)
MTestPrintError(merr);
}
/* if val == (void *)0, then static data that must not be freed */
}
}
merr = MPI_Win_free(win);
if (merr)
MTestPrintError(merr);
}
示例7: main
int main(int argc, char *argv[])
{
int errs = 0, err;
int j, count;
char *ap;
MTest_Init(&argc, &argv);
MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
for (count = 1; count < 128000; count *= 2) {
err = MPI_Alloc_mem(count, MPI_INFO_NULL, &ap);
if (err) {
int errclass;
/* An error of MPI_ERR_NO_MEM is allowed */
MPI_Error_class(err, &errclass);
if (errclass != MPI_ERR_NO_MEM) {
errs++;
MTestPrintError(err);
}
} else {
/* Access all of this memory */
for (j = 0; j < count; j++) {
ap[j] = (char) (j & 0x7f);
}
MPI_Free_mem(ap);
}
}
MTest_Finalize(errs);
return MTestReturnValue(errs);
}
示例8: sizeof
void SweptDiscretization2D::updateRemoteConstants(unsigned char *buffer)
{
void *sendingBuffer = NULL;
FILE *inFile = NULL;
if(pg.rank == 0)
{
int bufferSize = this->remoteConstantsCount * n * n * pg.mpiSize * sizeof(double);
MPI_Alloc_mem(bufferSize, MPI_INFO_NULL, &sendingBuffer);
for(int r=0;r<pg.mpiSize;r++)
{
double *processing = (double*)sendingBuffer + (this->remoteConstantsCount * n * n * r);
int jIndex = (r % (pg.xNodes*pg.yNodes)) / pg.xNodes;
int iIndex = r % pg.xNodes;
for(int j=0;j<n;j++)
{
for(int i=0;i<n;i++)
{
int iGlobal = n*iIndex + (i);
int jGlobal = n*jIndex + (j);
int index = this->ijToConstantIndex(i,j);
int globalIndex = this->remoteConstantsCount * (iGlobal + jGlobal * n * pg.xNodes);
for(int k=0;k<this->remoteConstantsCount;k++)
{
processing[index + k] = ((double*)buffer)[k + globalIndex];
}
}
}
}
}
MPI_Win_fence(MPI_MODE_NOPRECEDE, this->constantsWindow);
if(pg.rank == 0)
{
for(int r=0;r<pg.mpiSize;r++)
{
MPI_Put((unsigned char*)sendingBuffer + (r * remoteConstantsCount * n * n * sizeof(double)), remoteConstantsCount * n * n * sizeof(double), MPI_BYTE, r, 0, remoteConstantsCount * n * n * sizeof(double), MPI_BYTE, constantsWindow);
}
}
MPI_Win_fence((MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED), this->constantsWindow);
if(pg.rank == 0)
{
MPI_Free_mem(sendingBuffer);
}
for(int i=1;i<n+1;i++)
{
for(int j=1;j<n+1;j++)
{
for(int k=0;k<this->remoteConstantsCount;k++)
{
int windowIndex = this->ijToConstantIndex(i-1,j-1);
int foundationIndex = this->ijToIndex(i,j);
this->foundation[foundationIndex + k] = this->remoteConstants[windowIndex + k];
}
}
}
}
示例9: MPI_Free_mem
int MPI_Free_mem(void* baseptr)
{
if (max_ep > 0)
{
EPLIB_free(baseptr);
return MPI_SUCCESS;
}
return MPI_Free_mem(baseptr);
}
示例10: main
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
int my_rank; // Number of the node
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
int node_count; // Total number of nodes
MPI_Comm_size(MPI_COMM_WORLD, &node_count);
// The root must load the input data to distribute to the other nodes
if(my_rank == 0) {
// In our case it generates a random array as input data
srand(time(NULL));
for(int item = 0; item < items; ++item)
array[item] = rand();
}
int items_per_rank = items / node_count;
int remainder_items = items % node_count;
int* my_work;
MPI_Alloc_mem(items_per_rank * sizeof(int), MPI_INFO_NULL, &my_work);
// MPI_Scatter is a collective operation which distributes an equal-sized part of the given array to each node.
MPI_Scatter(&array[remainder_items] /* send buffer */, items_per_rank /* send count per node */, MPI_INT /* send type */,
my_work /* receive buffer on each node */, items_per_rank /* receive count */ , MPI_INT /* receive type */,
0 /* send buffer is stored on this rank */, MPI_COMM_WORLD /* communication channel */);
// This is the actual working-loop
long sub_sum = 0;
for(int i=0; i < items_per_rank; i++)
sub_sum += my_work[i];
if(my_rank == 0) { // Scatter cannot deal with a division remainder so we manually deal with it
while(remainder_items > 0)
sub_sum += array[--remainder_items];
}
MPI_Free_mem(my_work);
// MPI_Reduce with op-code MPI_SUM is a collective operation which sums up the input sub_sum of each node
// into single a resulting output sum on the master.
MPI_Reduce(&sub_sum /* input to sum up */, &sum /* output */, 1 /* input count */, MPI_LONG /* input type */,
MPI_SUM /* operation */, 0 /* output is stored on this rank */, MPI_COMM_WORLD /* communication channel */);
if(my_rank == 0) {
// The result of the computation now is available on rank 0.
// We compare it with the sequential reference implementation to test our parallel implementation.
if(sum == sum__sequential_reference_implementation())
fprintf(stderr, "Test OK.\n");
else
fprintf(stderr, "Test FAILED!\n");
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return EXIT_SUCCESS;
}
示例11: MPI_Alloc_mem
void SweptDiscretization2D::allGatherAllOutputToFile(string filename)
{
void *buffer = NULL;
FILE *output;
if(pg.rank == 0)
{
MPI_Alloc_mem(foundationSize * pg.mpiSize * sizeof(double), MPI_INFO_NULL, &buffer);
output = fopen(filename.c_str(),"wb");
}
MPI_Win_fence((MPI_MODE_NOPUT | MPI_MODE_NOPRECEDE), foundationWindow);
if(pg.rank == 0)
{
for(int r=0;r<pg.mpiSize;r++)
{
MPI_Get((char*)buffer + (r * foundationSize * sizeof(double)), foundationSize * sizeof(double), MPI_BYTE, r, 0, foundationSize * sizeof(double), MPI_BYTE, foundationWindow);
}
}
MPI_Win_fence(MPI_MODE_NOSUCCEED, foundationWindow);
if(pg.rank == 0)
{
int w = (n * pg.xNodes);
int h = (n * pg.yNodes);
int resultArraySize = w * h;
if(resultArray == NULL)
resultArray = (double*) malloc(resultArraySize * sizeof(double) * outputLength);
for(int r=0;r<pg.mpiSize;r++)
{
double *processing = (double*)buffer + (foundationSize * r);
int jIndex = (r % (pg.xNodes*pg.yNodes)) / pg.xNodes;
int iIndex = r % pg.xNodes;
for(int j=1;j<n+1;j++)
{
for(int i=1;i<n+1;i++)
{
int iGlobal = n*iIndex + (i-1);
int jGlobal = n*jIndex + (j-1);
int index = this->ijToIndex(i,j);
for(int point=0;point<outputLength;point++)
{
double val = processing[index + constants + point];
int resultIndex = (iGlobal + jGlobal * n * pg.xNodes) * outputLength + point;
resultArray[resultIndex] = val;
}
}
}
}
fwrite((const void*)resultArray,sizeof(double),resultArraySize,output);
fclose(output);
MPI_Free_mem(buffer);
}
MPI_Barrier(MPI_COMM_WORLD);
}
示例12: _ZMPI_Alltoall_int_proclists_put
static int _ZMPI_Alltoall_int_proclists_put(int alloc_mem, int nphases, int *sendbuf, int nsprocs, int *sprocs, int *recvbuf, int nrprocs, int *rprocs, MPI_Comm comm)
{
int i, p, size, rank, *rcounts_put;
MPI_Win win;
MPI_Comm_size(comm, &size);
MPI_Comm_rank(comm, &rank);
if (alloc_mem) MPI_Alloc_mem(size * sizeof(int), MPI_INFO_NULL, &rcounts_put);
else rcounts_put = recvbuf;
if (nrprocs >= 0)
for (i = 0; i < nrprocs; ++i) rcounts_put[rprocs[i]] = DEFAULT_INT;
else
for (i = 0; i < size; ++i) rcounts_put[i] = DEFAULT_INT;
MPI_Win_create(rcounts_put, size * sizeof(int), sizeof(int), MPI_INFO_NULL, comm, &win);
MPI_Win_fence(MPI_MODE_NOSTORE|MPI_MODE_NOPRECEDE, win);
for (p = 0; p < nphases; ++p)
{
/* printf("%d: phase = %d of %d\n", rank, p, nphases);*/
if (rank % nphases == p)
{
if (nsprocs >= 0)
{
for (i = 0; i < nsprocs; ++i)
if (sendbuf[sprocs[i]] != DEFAULT_INT) MPI_Put(&sendbuf[sprocs[i]], 1, MPI_INT, sprocs[i], rank, 1, MPI_INT, win);
} else
{
for (i = 0; i < size; ++i)
if (sendbuf[i] != DEFAULT_INT) MPI_Put(&sendbuf[i], 1, MPI_INT, i, rank, 1, MPI_INT, win);
}
}
if (p < nphases - 1) MPI_Win_fence(0, win);
}
MPI_Win_fence(MPI_MODE_NOPUT|MPI_MODE_NOSUCCEED, win);
MPI_Win_free(&win);
if (alloc_mem)
{
if (nrprocs >= 0)
for (i = 0; i < nrprocs; ++i) recvbuf[rprocs[i]] = rcounts_put[rprocs[i]];
else
for (i = 0; i < size; ++i) recvbuf[i] = rcounts_put[i];
MPI_Free_mem(rcounts_put);
}
return MPI_SUCCESS;
}
示例13: mpp_free
void mpp_free (void *buf)
{
#if HAVE_MPI_ALLOC_MEM
if (use_mpi_alloc)
MPI_Free_mem (buf);
else
#endif
free (buf);
return;
}
示例14: socket_freeMem
/**
* Wrappers for MPI_Free_mem for computers which may not have them (MPI-1 computers).
*/
static void socket_freeMem(socket_t * s)
{
assert(s->buffer);
#ifndef MC_NO_MPI_ALLOC_MEM
MPI_Free_mem(s->buffer);
#else
free(s->buffer);
#endif
s->buffer = 0;
}
示例15: MPI_Alloc_mem
void mpiofstream::flush()
{
MPI_Status status;
char* buf;
MPI_Alloc_mem(ss.str().length()+1, MPI_INFO_NULL, &buf);
strcpy(buf, ss.str().c_str());
MPI_File_write_shared(fh, buf, ss.str().length(), MPI_CHAR, &status);
MPI_Free_mem(buf);
ss.str("");
}