本文整理汇总了C++中MPI_Type_get_extent函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Type_get_extent函数的具体用法?C++ MPI_Type_get_extent怎么用?C++ MPI_Type_get_extent使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Type_get_extent函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: dtcmp_select_local_randpartition_keys
static int dtcmp_select_local_randpartition_keys(
void* buf,
int num,
int k,
void* item,
MPI_Datatype key,
DTCMP_Op cmp,
DTCMP_Flags hints)
{
/* randomly pick a pivot value */
int pivot = rand_r(&dtcmp_rand_seed) % num;
/* partition around this value, and determine the rank of the pivot value */
int pivot_rank;
DTCMP_Partition_local_dtcpy(buf, num, pivot, &pivot_rank, key, key, cmp, hints);
/* compare the rank of the pivot to the target rank we're looking for */
if (k < pivot_rank) {
/* the item is smaller than the pivot, so recurse into lower half of array */
int num_left = pivot_rank;
int rc = dtcmp_select_local_randpartition_keys(buf, num_left, k, item, key, cmp, hints);
return rc;
} else if (k > pivot_rank) {
/* the item is larger than the pivot, so recurse into upper half of array */
/* get lower bound and extent of key */
MPI_Aint lb, extent;
MPI_Type_get_extent(key, &lb, &extent);
/* adjust pointer into array, rank, and number of remaining items */
int after_pivot = pivot_rank + 1;
char* offset = (char*)buf + after_pivot * extent;
int num_left = num - after_pivot;
int new_k = k - after_pivot;
int rc = dtcmp_select_local_randpartition_keys(offset, num_left, new_k, item, key, cmp, hints);
return rc;
} else { /* k == pivot_rank */
/* in this case, the pivot rank is the target rank we're looking for,
* copy the pivot item into the output item and return */
/* get lower bound and extent of key */
MPI_Aint lb, extent;
MPI_Type_get_extent(key, &lb, &extent);
/* copy the pivot value into item and return */
char* pivot_offset = (char*)buf + pivot_rank * extent;
DTCMP_Memcpy(item, 1, key, pivot_offset, 1, key);
return DTCMP_SUCCESS;
}
}
示例2: ZMPI_Alltoallv_proclists
int ZMPI_Alltoallv_proclists(void* sendbuf, int *sendcounts, int *sdispls, MPI_Datatype sendtype, int nsendprocs, int *sendprocs, void* recvbuf, int *recvcounts, int *rdispls, MPI_Datatype recvtype, int nrecvprocs, int *recvprocs, MPI_Comm comm) /* zmpi_func ZMPI_Alltoallv_proclists */
{
int i, j;
const int tag = 0;
int nreqs;
MPI_Request *reqs;
MPI_Status *stats;
MPI_Aint sendtype_lb, sendtype_extent, recvtype_lb, recvtype_extent;
reqs = z_alloc(nrecvprocs + nsendprocs, sizeof(MPI_Request));
stats = z_alloc(nrecvprocs + nsendprocs, sizeof(MPI_Status));
MPI_Type_get_extent(sendtype, &sendtype_lb, &sendtype_extent);
MPI_Type_get_extent(recvtype, &recvtype_lb, &recvtype_extent);
nreqs = 0;
for (i = 0; i < nrecvprocs; ++i)
{
j = recvprocs[i];
if (recvcounts[j] > 0)
{
MPI_Irecv(((char *) recvbuf) + (rdispls[j] * recvtype_extent), recvcounts[j], recvtype, j, tag, comm, &reqs[nreqs]);
++nreqs;
}
}
for (i = 0; i < nsendprocs; ++i)
{
j = sendprocs[i];
if (sendcounts[j] > 0)
{
MPI_Isend(((char *) sendbuf) + (sdispls[j] * sendtype_extent), sendcounts[j], sendtype, j, tag, comm, &reqs[nreqs]);
++nreqs;
}
}
MPI_Waitall(nreqs, reqs, stats);
z_free(reqs);
z_free(stats);
return MPI_SUCCESS;
}
示例3: MPI_Type_get_extent
/*
* Initialize buffer of basic datatype
*/
static void *MTestTypeContigInit(MTestDatatype * mtype)
{
MPI_Aint extent = 0, lb = 0, size;
int merr;
if (mtype->count > 0) {
unsigned char *p;
MPI_Aint i, totsize;
merr = MPI_Type_get_extent(mtype->datatype, &lb, &extent);
if (merr)
MTestPrintError(merr);
size = extent + lb;
totsize = size * mtype->count;
if (!mtype->buf) {
mtype->buf = (void *) malloc(totsize);
}
p = (unsigned char *) (mtype->buf);
if (!p) {
char errmsg[128] = { 0 };
sprintf(errmsg, "Out of memory in %s", __FUNCTION__);
MTestError(errmsg);
}
for (i = 0; i < totsize; i++) {
p[i] = (unsigned char) (0xff ^ (i & 0xff));
}
}
else {
if (mtype->buf) {
free(mtype->buf);
}
mtype->buf = 0;
}
return mtype->buf;
}
示例4: MakeDatatype
static PetscErrorCode MakeDatatype(MPI_Datatype *dtype)
{
PetscErrorCode ierr;
MPI_Datatype dtypes[3],tmptype;
PetscMPIInt lengths[3];
MPI_Aint displs[3];
Unit dummy;
PetscFunctionBegin;
dtypes[0] = MPIU_INT;
dtypes[1] = MPIU_SCALAR;
dtypes[2] = MPI_CHAR;
lengths[0] = 1;
lengths[1] = 1;
lengths[2] = 3;
/* Curse the evil beings that made std::complex a non-POD type. */
displs[0] = (char*)&dummy.rank - (char*)&dummy; /* offsetof(Unit,rank); */
displs[1] = (char*)&dummy.value - (char*)&dummy; /* offsetof(Unit,value); */
displs[2] = (char*)&dummy.ok - (char*)&dummy; /* offsetof(Unit,ok); */
ierr = MPI_Type_create_struct(3,lengths,displs,dtypes,&tmptype);CHKERRQ(ierr);
ierr = MPI_Type_commit(&tmptype);CHKERRQ(ierr);
ierr = MPI_Type_create_resized(tmptype,0,sizeof(Unit),dtype);CHKERRQ(ierr);
ierr = MPI_Type_commit(dtype);CHKERRQ(ierr);
ierr = MPI_Type_free(&tmptype);CHKERRQ(ierr);
{
MPI_Aint lb,extent;
ierr = MPI_Type_get_extent(*dtype,&lb,&extent);CHKERRQ(ierr);
if (extent != sizeof(Unit)) SETERRQ2(PETSC_COMM_WORLD,PETSC_ERR_LIB,"New type has extent %d != sizeof(Unit) %d",extent,(int)sizeof(Unit));
}
PetscFunctionReturn(0);
}
示例5: mpi_type_get_extent_f
void mpi_type_get_extent_f(MPI_Fint *type, MPI_Aint *lb,
MPI_Aint *extent, MPI_Fint *ierr)
{
MPI_Datatype c_type = MPI_Type_f2c(*type);
*ierr = OMPI_INT_2_FINT(MPI_Type_get_extent(c_type, lb, extent));
}
示例6: PetscCommBuildTwoSided_Ibarrier
static PetscErrorCode PetscCommBuildTwoSided_Ibarrier(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
{
PetscErrorCode ierr;
PetscMPIInt nrecvs,tag,done,i;
MPI_Aint lb,unitbytes;
char *tdata;
MPI_Request *sendreqs,barrier;
PetscSegBuffer segrank,segdata;
PetscFunctionBegin;
ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr);
ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
tdata = (char*)todata;
ierr = PetscMalloc1(nto,&sendreqs);CHKERRQ(ierr);
for (i=0; i<nto; i++) {
ierr = MPI_Issend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr);
}
ierr = PetscSegBufferCreate(sizeof(PetscMPIInt),4,&segrank);CHKERRQ(ierr);
ierr = PetscSegBufferCreate(unitbytes,4*count,&segdata);CHKERRQ(ierr);
nrecvs = 0;
barrier = MPI_REQUEST_NULL;
for (done=0; !done; ) {
PetscMPIInt flag;
MPI_Status status;
ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag,comm,&flag,&status);CHKERRQ(ierr);
if (flag) { /* incoming message */
PetscMPIInt *recvrank;
void *buf;
ierr = PetscSegBufferGet(segrank,1,&recvrank);CHKERRQ(ierr);
ierr = PetscSegBufferGet(segdata,count,&buf);CHKERRQ(ierr);
*recvrank = status.MPI_SOURCE;
ierr = MPI_Recv(buf,count,dtype,status.MPI_SOURCE,tag,comm,MPI_STATUS_IGNORE);CHKERRQ(ierr);
nrecvs++;
}
if (barrier == MPI_REQUEST_NULL) {
PetscMPIInt sent,nsends;
ierr = PetscMPIIntCast(nto,&nsends);CHKERRQ(ierr);
ierr = MPI_Testall(nsends,sendreqs,&sent,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
if (sent) {
#if defined(PETSC_HAVE_MPI_IBARRIER)
ierr = MPI_Ibarrier(comm,&barrier);CHKERRQ(ierr);
#elif defined(PETSC_HAVE_MPIX_IBARRIER)
ierr = MPIX_Ibarrier(comm,&barrier);CHKERRQ(ierr);
#endif
ierr = PetscFree(sendreqs);CHKERRQ(ierr);
}
} else {
ierr = MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);CHKERRQ(ierr);
}
}
*nfrom = nrecvs;
ierr = PetscSegBufferExtractAlloc(segrank,fromranks);CHKERRQ(ierr);
ierr = PetscSegBufferDestroy(&segrank);CHKERRQ(ierr);
ierr = PetscSegBufferExtractAlloc(segdata,fromdata);CHKERRQ(ierr);
ierr = PetscSegBufferDestroy(&segdata);CHKERRQ(ierr);
ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例7: type_create_contiguous_x
static int type_create_contiguous_x(MPI_Count count,
MPI_Datatype oldtype, MPI_Datatype *newtype)
{
/* to make 'count' fit MPI-3 type processing routines (which take integer
* counts), we construct a type consisting of N INT_MAX chunks followed by
* a remainder. e.g for a count of 4000000000 bytes you would end up with
* one 2147483647-byte chunk followed immediately by a 1852516353-byte
* chunk */
MPI_Datatype chunks, remainder;
MPI_Aint lb, extent, disps[2];
int blocklens[2];
MPI_Datatype types[2];
MPI_Count c = count/INT_MAX;
MPI_Count r = count%INT_MAX;
MPI_Type_vector(c, INT_MAX, INT_MAX, oldtype, &chunks);
MPI_Type_contiguous(r, oldtype, &remainder);
MPI_Type_get_extent(oldtype, &lb, &extent);
blocklens[0] = 1; blocklens[1] = 1;
disps[0] = 0; disps[1] = c*extent*INT_MAX;
types[0] = chunks; types[1] = remainder;
MPI_Type_create_struct(2, blocklens, disps, types, newtype);
MPI_Type_free(&chunks);
MPI_Type_free(&remainder);
return MPI_SUCCESS;
}
示例8: MTestTypeContigCheckbuf
/*
* Check value of received basic datatype buffer.
*/
static int MTestTypeContigCheckbuf(MTestDatatype * mtype)
{
unsigned char *p;
unsigned char expected;
int err = 0, merr;
MPI_Aint i, totsize, size, extent = 0, lb = 0;
p = (unsigned char *) mtype->buf;
if (p) {
merr = MPI_Type_get_extent(mtype->datatype, &lb, &extent);
if (merr)
MTestPrintError(merr);
size = lb + extent;
totsize = size * mtype->count;
for (i = 0; i < totsize; i++) {
expected = (unsigned char) (0xff ^ (i & 0xff));
if (p[i] != expected) {
err++;
if (mtype->printErrors && err < 10) {
printf("Data expected = %x but got p[%ld] = %x\n", expected, i, p[i]);
fflush(stdout);
}
}
}
}
return err;
}
示例9: do_test_for_ddt
static int do_test_for_ddt( MPI_Datatype sddt, MPI_Datatype rddt, int length )
{
int i;
MPI_Aint lb, extent;
char *sbuf, *rbuf;
MPI_Type_get_extent( sddt, &lb, &extent );
sbuf = (char*)malloc( length );
rbuf = (char*)malloc( length );
printf( "# Isend recv\n" );
for( i = 1; i <= (length/extent); i *= 2 ) {
isend_recv( 10, sddt, i, sbuf, rddt, i, rbuf );
}
printf( "# Isend Irecv Wait\n" );
for( i = 1; i <= (length/extent); i *= 2 ) {
isend_irecv_wait( 10, sddt, i, sbuf, rddt, i, rbuf );
}
printf( "# Irecv send\n" );
for( i = 1; i <= (length/extent); i *= 2 ) {
irecv_send( 10, sddt, i, sbuf, rddt, i, rbuf );
}
printf( "# Irecv Isend Wait\n" );
for( i = 1; i <= (length/extent); i *= 2 ) {
irecv_isend_wait( 10, sddt, i, sbuf, rddt, i, rbuf );
}
free( sbuf );
free( rbuf );
return 0;
}
示例10: copy_key_if_valid
void copy_key_if_valid(
void* invec,
void* inoutvec,
int* len,
MPI_Datatype* type)
{
/* get extent of user's datatype */
MPI_Aint lb, extent;
MPI_Type_get_extent(*type, &lb, &extent);
/* get pointers to start of input and output buffers */
char* inbuf = (char*) invec;
char* outbuf = (char*) inoutvec;
/* loop over each element provided in call */
int i = 0;
while (i < *len) {
/* if our current entry is valid, keep it,
* otherwise just copy over whatever first value is */
int valid2 = *(int*) inoutvec;
if (!valid2) {
/* TODO: if type is big, could optimize by avoiding copy
* if inbuf is also not valid */
/* copy value from inbuf to outbuf */
DTCMP_Memcpy(outbuf, 1, *type, inbuf, 1, *type);
}
/* increment pointers to handle next element */
inbuf += extent;
outbuf += extent;
i++;
}
}
示例11: print_schedule
static void print_schedule(active_schedule_t *schedule)
{
int global_self;
int self;
MPI_Aint lb;
MPI_Aint extent;
MPI_Comm_rank(MPI_COMM_WORLD, &global_self);
MPI_Comm_rank(schedule->comm, &self);
MPI_Type_get_extent(schedule->type, &lb, &extent);
fprintf(stderr,
"%d: schedule=%p, comm=%p, "
"type=%p(lb=%ld, extent=%ld), count=%d, %d %s %d\n",
global_self,
schedule,
(void *) schedule->comm,
(void *) schedule->type,
(long) lb,
(long) extent,
schedule->count,
self,
schedule->direction == ACTIVE_SCHEDULE_SEND ? " -> " : " <- ",
schedule->peer);
}
示例12: transpose_type
/* Extract an m x n submatrix within an m x N matrix and transpose it.
Assume storage by rows; the defined datatype accesses by columns */
MPI_Datatype transpose_type(int N, int m, int n, MPI_Datatype type)
/* computes a datatype for the transpose of an mxn matrix
with entries of type type */
{
MPI_Datatype subrow, subrow1, submatrix;
MPI_Aint lb, extent;
MPI_Type_vector(m, 1, N, type, &subrow);
MPI_Type_get_extent(type, &lb, &extent);
MPI_Type_create_resized(subrow, 0, extent, &subrow1);
MPI_Type_contiguous(n, subrow1, &submatrix);
MPI_Type_commit(&submatrix);
MPI_Type_free( &subrow );
MPI_Type_free( &subrow1 );
/* Add a consistency test: the size of submatrix should be
n * m * sizeof(type) and the extent should be ((m-1)*N+n) * sizeof(type) */
{
int tsize;
MPI_Aint textent, llb;
MPI_Type_size( type, &tsize );
MPI_Type_get_true_extent( submatrix, &llb, &textent );
if (textent != tsize * (N * (m-1)+n)) {
fprintf( stderr, "Transpose Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
(long)textent, (long)(tsize * (N * (m-1)+n)), N, n, m );
}
}
return(submatrix);
}
示例13: submatrix_type
/* Define an n x m submatrix in a n x M local matrix (this is the
destination in the transpose matrix */
MPI_Datatype submatrix_type(int M, int m, int n, MPI_Datatype type)
/* computes a datatype for an mxn submatrix within an MxN matrix
with entries of type type */
{
/* MPI_Datatype subrow; */
MPI_Datatype submatrix;
/* The book, MPI: The Complete Reference, has the wrong type constructor
here. Since the stride in the vector type is relative to the input
type, the stride in the book's code is n times as long as is intended.
Since n may not exactly divide N, it is better to simply use the
blocklength argument in Type_vector */
/*
MPI_Type_contiguous(n, type, &subrow);
MPI_Type_vector(m, 1, N, subrow, &submatrix);
*/
MPI_Type_vector(n, m, M, type, &submatrix );
MPI_Type_commit(&submatrix);
/* Add a consistency test: the size of submatrix should be
n * m * sizeof(type) and the extent should be ((n-1)*M+m) * sizeof(type) */
{
int tsize;
MPI_Aint textent, lb;
MPI_Type_size( type, &tsize );
MPI_Type_get_extent( submatrix, &lb, &textent );
if (textent != tsize * (M * (n-1)+m)) {
fprintf( stderr, "Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
(long)textent, (long)(tsize * (M * (n-1)+m)), M, n, m );
}
}
return(submatrix);
}
示例14: BigMPI_Convert_vectors
/*
* Synopsis
*
* void convert_vectors(..)
*
* Input Parameter
*
* int num length of all vectors (unless splat true)
* int splat_old_count if non-zero, use oldcount instead of iterating over vector (v-to-w)
* MPI_Count oldcount single count (ignored if splat_old_count==0)
* MPI_Count oldcounts vector of counts
* int splat_old_type if non-zero, use oldtype instead of iterating over vector (v-to-w)
* MPI_Datatype oldtype single type (MPI_DATATYPE_NULL if splat_old_type==0)
* MPI_Datatype oldtypes vector of types (NULL if splat_old_type!=0)
* int zero_new_displs set the displacement to zero (scatter/gather)
* MPI_Aint olddispls vector of displacements (NULL if zero_new_displs!=0)
*
* Output Parameters
*
* int newcounts
* MPI_Datatype newtypes
* MPI_Aint newdispls
*
*/
void BigMPI_Convert_vectors(int num,
int splat_old_count,
const MPI_Count oldcount,
const MPI_Count oldcounts[],
int splat_old_type,
const MPI_Datatype oldtype,
const MPI_Datatype oldtypes[],
int zero_new_displs,
const MPI_Aint olddispls[],
int newcounts[],
MPI_Datatype newtypes[],
MPI_Aint newdispls[])
{
assert(splat_old_count || (oldcounts!=NULL));
assert(splat_old_type || (oldtypes!=NULL));
assert(zero_new_displs || (olddispls!=NULL));
MPI_Aint lb /* unused */, oldextent;
if (splat_old_type) {
MPI_Type_get_extent(oldtype, &lb, &oldextent);
} else {
/* !splat_old_type implies ALLTOALLW, which implies no displacement zeroing. */
assert(!zero_new_displs);
}
for (int i=0; i<num; i++) {
/* counts */
newcounts[i] = 1;
/* types */
MPIX_Type_contiguous_x(oldcounts[i], splat_old_type ? oldtype : oldtypes[i], &newtypes[i]);
MPI_Type_commit(&newtypes[i]);
/* displacements */
MPI_Aint newextent;
/* If we are not splatting old type, it implies ALLTOALLW,
* which does not scale the displacement by the type extent,
* nor would we ever zero the displacements. */
if (splat_old_type) {
MPI_Type_get_extent(newtypes[i], &lb, &newextent);
newdispls[i] = (zero_new_displs ? 0 : olddispls[i]*oldextent/newextent);
} else {
newdispls[i] = olddispls[i];
}
}
return;
}
示例15: MPI_File_set_view
/*
MPICH does not provide the external32 representation for MPI_File_set_view() so we need to provide the functions.
These are set into MPI in PetscInitialize() via MPI_Register_datarep()
Note I use PetscMPIInt for the MPI error codes since that is what MPI uses (instead of the standard PetscErrorCode)
The next three routines are not used because MPICH does not support their use
*/
PETSC_EXTERN PetscMPIInt PetscDataRep_extent_fn(MPI_Datatype datatype,MPI_Aint *file_extent,void *extra_state)
{
MPI_Aint ub;
PetscMPIInt ierr;
ierr = MPI_Type_get_extent(datatype,&ub,file_extent);
return ierr;
}