本文整理汇总了C++中MPI_Comm_dup函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Comm_dup函数的具体用法?C++ MPI_Comm_dup怎么用?C++ MPI_Comm_dup使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Comm_dup函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
main(int argc, char* argv[]) {
int test_count = 2; /* Number of tests */
int max_size = 1000; /* Max msg. length */
int min_size = 1000; /* Min msg. length */
int size_incr = 1000; /* Increment for */
/* msg. sizes */
float* x; /* Message buffer */
double* times; /* Elapsed times */
double* max_times; /* Max times */
double* min_times; /* Min times */
int time_array_order; /* Size of timing */
/* arrays. */
double start; /* Start time */
double elapsed; /* Elapsed time */
int i, test, size; /* Loop variables */
int p, my_rank, source, dest;
MPI_Comm io_comm;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_dup(MPI_COMM_WORLD, &io_comm);
Cache_io_rank(MPI_COMM_WORLD, io_comm);
Initialize(max_size, min_size, size_incr, my_rank,
&x, ×, &max_times, &min_times,
&time_array_order);
source = (my_rank - 1) % p;
dest = (my_rank + 1) % p;
/* For each message size, find average circuit time */
/* Loop var size = message size */
/* Loop var i = index into arrays for timings */
for (size = min_size, i = 0; size <= max_size;
size = size + size_incr, i++) {
times[i] =0.0;
max_times[i] = 0.0;
min_times[i] = 1000000.0;
for (test = 0; test < test_count; test++) {
start = MPI_Wtime();
MPI_Recv(x, size, MPI_FLOAT, source, 0,
MPI_COMM_WORLD, &status);
MPI_Send(x, size, MPI_FLOAT, dest, 0,
MPI_COMM_WORLD);
elapsed = MPI_Wtime() - start;
times[i] = times[i] + elapsed;
if (elapsed > max_times[i])
max_times[i] = elapsed;
if (elapsed < min_times[i])
min_times[i] = elapsed;
}
} /* for size . . . */
Print_results(io_comm, my_rank, min_size, max_size,
size_incr, time_array_order, test_count, times,
max_times, min_times);
MPI_Finalize();
} /* main */
示例2: PIOc_Init_Intracomm
/* @ingroup PIO_init
*
* Library initialization used when IO tasks are a subset of compute
* tasks.
*
* This function creates an MPI intracommunicator between a set of IO
* tasks and one or more sets of computational tasks.
*
* The caller must create all comp_comm and the io_comm MPI
* communicators before calling this function.
*
* @param comp_comm the MPI_Comm of the compute tasks
*
* @param num_iotasks the number of io tasks to use
*
* @param stride the offset between io tasks in the comp_comm
*
* @param base the comp_comm index of the first io task
*
* @param rearr the rearranger to use by default, this may be
* overriden in the @ref PIO_initdecomp
*
* @param iosysidp index of the defined system descriptor
*
* @return 0 on success, otherwise a PIO error code.
*/
int PIOc_Init_Intracomm(const MPI_Comm comp_comm, const int num_iotasks,
const int stride, const int base, const int rearr,
int *iosysidp)
{
iosystem_desc_t *iosys;
int ierr = PIO_NOERR;
int ustride;
int lbase;
int mpierr;
iosys = (iosystem_desc_t *) malloc(sizeof(iosystem_desc_t));
/* Copy the computation communicator into union_comm. */
mpierr = MPI_Comm_dup(comp_comm, &iosys->union_comm);
CheckMPIReturn(mpierr, __FILE__, __LINE__);
if (mpierr)
ierr = PIO_EIO;
/* Copy the computation communicator into comp_comm. */
if (!ierr)
{
mpierr = MPI_Comm_dup(comp_comm, &iosys->comp_comm);
CheckMPIReturn(mpierr, __FILE__, __LINE__);
if (mpierr)
ierr = PIO_EIO;
}
if (!ierr)
{
iosys->my_comm = iosys->comp_comm;
iosys->io_comm = MPI_COMM_NULL;
iosys->intercomm = MPI_COMM_NULL;
iosys->error_handler = PIO_INTERNAL_ERROR;
iosys->async_interface= false;
iosys->compmaster = 0;
iosys->iomaster = 0;
iosys->ioproc = false;
iosys->default_rearranger = rearr;
iosys->num_iotasks = num_iotasks;
ustride = stride;
/* Find MPI rank and number of tasks in comp_comm communicator. */
CheckMPIReturn(MPI_Comm_rank(iosys->comp_comm, &(iosys->comp_rank)),__FILE__,__LINE__);
CheckMPIReturn(MPI_Comm_size(iosys->comp_comm, &(iosys->num_comptasks)),__FILE__,__LINE__);
if(iosys->comp_rank==0)
iosys->compmaster = MPI_ROOT;
/* Ensure that settings for number of computation tasks, number
* of IO tasks, and the stride are reasonable. */
if((iosys->num_comptasks == 1) && (num_iotasks*ustride > 1)) {
// This is a serial run with a bad configuration. Set up a single task.
fprintf(stderr, "PIO_TP PIOc_Init_Intracomm reset stride and tasks.\n");
iosys->num_iotasks = 1;
ustride = 1;
}
if((iosys->num_iotasks < 1) || ((iosys->num_iotasks*ustride) > iosys->num_comptasks)){
fprintf(stderr, "PIO_TP PIOc_Init_Intracomm error\n");
fprintf(stderr, "num_iotasks=%d, ustride=%d, num_comptasks=%d\n", num_iotasks, ustride, iosys->num_comptasks);
return PIO_EBADID;
}
/* Create an array that holds the ranks of the tasks to be used for IO. */
iosys->ioranks = (int *) calloc(sizeof(int), iosys->num_iotasks);
for(int i=0;i< iosys->num_iotasks; i++){
iosys->ioranks[i] = (base + i*ustride) % iosys->num_comptasks;
if(iosys->ioranks[i] == iosys->comp_rank)
iosys->ioproc = true;
}
iosys->ioroot = iosys->ioranks[0];
/* Create an MPI info object. */
CheckMPIReturn(MPI_Info_create(&(iosys->info)),__FILE__,__LINE__);
iosys->info = MPI_INFO_NULL;
//.........这里部分代码省略.........
示例3: main
int main(int argc, char *argv[])
{
int provided, i[2], k;
char *buffer, *ptr_dt;
buffer = (char *) malloc(BUFSIZE * sizeof(char));
MPI_Status status;
pthread_t receiver_thread, sender_thread[NUMSENDS];
pthread_attr_t attr;
MPI_Comm communicator;
int bs;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
if (provided != MPI_THREAD_MULTIPLE) {
printf("Error\n");
MPI_Abort(911, MPI_COMM_WORLD);
}
MPI_Buffer_attach(buffer, BUFSIZE);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_dup(MPI_COMM_WORLD, &communicator); /* We do not use this communicator in this program, but
with this call, the problem appears more reliably.
If the MPI_Comm_dup() call is commented out, it is still
evident but does not appear that often (don't know why) */
/* Initialize and set thread detached attribute */
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
pthread_create(&receiver_thread, &attr, &receiver, NULL);
for (k = 0; k < NUMSENDS; k++)
pthread_create(&sender_thread[k], &attr, &sender_bsend, NULL);
pthread_join(receiver_thread, NULL);
for (k = 0; k < NUMSENDS; k++)
pthread_join(sender_thread[k], NULL);
MPI_Barrier(MPI_COMM_WORLD);
pthread_create(&receiver_thread, &attr, &receiver, NULL);
for (k = 0; k < NUMSENDS; k++)
pthread_create(&sender_thread[k], &attr, &sender_ibsend, NULL);
pthread_join(receiver_thread, NULL);
for (k = 0; k < NUMSENDS; k++)
pthread_join(sender_thread[k], NULL);
MPI_Barrier(MPI_COMM_WORLD);
pthread_create(&receiver_thread, &attr, &receiver, NULL);
for (k = 0; k < NUMSENDS; k++)
pthread_create(&sender_thread[k], &attr, &sender_isend, NULL);
pthread_join(receiver_thread, NULL);
for (k = 0; k < NUMSENDS; k++)
pthread_join(sender_thread[k], NULL);
MPI_Barrier(MPI_COMM_WORLD);
pthread_create(&receiver_thread, &attr, &receiver, NULL);
for (k = 0; k < NUMSENDS; k++)
pthread_create(&sender_thread[k], &attr, &sender_send, NULL);
pthread_join(receiver_thread, NULL);
for (k = 0; k < NUMSENDS; k++)
pthread_join(sender_thread[k], NULL);
MPI_Barrier(MPI_COMM_WORLD);
pthread_attr_destroy(&attr);
if (!rank)
printf( " No Errors\n" );
MPI_Comm_free(&communicator);
MPI_Buffer_detach(&ptr_dt, &bs);
free(buffer);
MPI_Finalize();
return 0;
}
示例4: main
int main( int argc, char* argv[] ) {
MPI_Comm CommWorld;
int rank;
int numProcessors;
int procToWatch;
/* Initialise MPI, get world info */
MPI_Init( &argc, &argv );
MPI_Comm_dup( MPI_COMM_WORLD, &CommWorld );
MPI_Comm_size( CommWorld, &numProcessors );
MPI_Comm_rank( CommWorld, &rank );
BaseFoundation_Init( &argc, &argv );
BaseIO_Init( &argc, &argv );
if( argc >= 2 ) {
procToWatch = atoi( argv[1] );
}
else {
procToWatch = 0;
}
if( rank == procToWatch )
{
Stream* myInfo;
Stream* myDebug;
Stream* myDump;
Stream* myError;
Stream* allNew;
myInfo = Journal_Register( Info_Type, "MyInfo" );
myDebug = Journal_Register( Debug_Type, "MyDebug" );
myDump = Journal_Register( Dump_Type, "MyDump" );
myError = Journal_Register( Error_Type, "MyError" );
allNew = Journal_Register( "My own stream", "allNew" );
printf( "TEST: \"HELLO\" should appear\n" );
Journal_Printf( myInfo, "%s\n", "HELLO" );
printf( "TEST: \"WORLD\" should NOT appear\n" );
Journal_Printf( myDebug, "%s\n", "HELLO" );
printf( "TEST: \"HELLO\" should NOT appear\n" );
Journal_Printf( myDump, "%s\n", "HELLO" );
printf( "TEST: \"WORLD\" should NOT appear\n" );
Journal_Printf( myError, "%s\n", "HELLO" );
printf( "Turning off myInfo\n" );
Journal_Enable_NamedStream( Info_Type, "MyInfo" , False );
printf( "TEST: \"HELLO\" should NOT appear\n" );
Journal_Printf( myInfo, "%s\n", "HELLO" );
printf( "Turning on Dump\n" );
Journal_Enable_TypedStream( Dump_Type, True );
Journal_Enable_NamedStream( Dump_Type, "MyDump", True );
printf( "TEST: \"HELLO\" should appear\n" );
Journal_Printf( myDump, "%s\n", "HELLO" );
printf( "Turning off Journal\n" );
stJournal->enable = False;
printf( "TEST: \"HELLO\" should NOT appear\n" );
Journal_Printf( myDump, "%s\n", "HELLO" );
stJournal->enable = True;
Journal_Enable_NamedStream( Info_Type, "MyInfo", True );
printf( "TEST: DPrintf\n" );
Journal_DPrintf( myInfo, "DPrintf\n" );
}
Memory_Print();
BaseIO_Finalise();
BaseFoundation_Finalise();
/* Close off MPI */
MPI_Finalize();
return EXIT_SUCCESS;
}
示例5: main
int main(int narg, char* arg[])
{
sint transpose=0;
sint id=0,np=1;
sint i,handle,maxv=3;
real *u;
slong *glindex;
#ifndef MPI
int comm;
#else
MPI_Comm comm;
MPI_Init(&narg,&arg);
MPI_Comm_dup(MPI_COMM_WORLD,&comm);
{ int i;
MPI_Comm_rank(comm,&i); id=i;
MPI_Comm_size(comm,&i); np=i;
}
#endif
glindex = malloc(np*2*sizeof(slong));
for(i=0;i<np;++i) glindex[2*i+1] = glindex[2*i] = i+1;
i=np*2;
fgs_setup(&handle,glindex,&i,&comm,&np);
free(glindex);
u = malloc(np*2*sizeof(real));
for(i=0;i<np;++i) u[2*i ] = (real)( 2*np*id + 2*i ),
u[2*i+1] = (real)( 2*np*id + 2*i+1 );
/*for(i=0;i<np;++i) printf(" (%g %g)", u[2*i], u[2*i+1]); printf("\n");*/
i=1, fgs_op(&handle,u,&datatype,&i,&transpose);
/*for(i=0;i<np;++i) printf(" (%g %g)", u[2*i], u[2*i+1]); printf("\n");*/
for(i=0;i<np;++i) assert_is_zero( np*(2*np*(np-1)+4*i+1) - u[2*i] ),
assert_is_zero( np*(2*np*(np-1)+4*i+1) - u[2*i+1] );
free(u);
u = malloc(np*2*3*sizeof(real));
for(i=0;i<np;++i)
u[3*(2*i )+0] = (real)( 3*(2*np*id + 2*i ) + 0 ),
u[3*(2*i )+1] = (real)( 3*(2*np*id + 2*i ) + 1 ),
u[3*(2*i )+2] = (real)( 3*(2*np*id + 2*i ) + 2 ),
u[3*(2*i+1)+0] = (real)( 3*(2*np*id + 2*i+1) + 0 ),
u[3*(2*i+1)+1] = (real)( 3*(2*np*id + 2*i+1) + 1 ),
u[3*(2*i+1)+2] = (real)( 3*(2*np*id + 2*i+1) + 2 );
/*for(i=0;i<np;++i) {
int j;
printf("%d: ( ", id);
for(j=3*(2*i);j<=3*(2*i+1)+2;++j) printf("%g ",u[j]);
printf(")\n");
}*/
i=1, maxv=3, fgs_op_vec(&handle,u,&maxv,&datatype,&i,&transpose);
/*for(i=0;i<np;++i) {
int j;
printf("%d: ( ", id);
for(j=3*(2*i);j<=3*(2*i+1)+2;++j) printf("%g ",u[j]);
printf(")\n");
}*/
for(i=0;i<np;++i)
assert_is_zero( np*(6*np*(np-1)+12*i+3+2*0) - u[3*(2*i )+0] ),
assert_is_zero( np*(6*np*(np-1)+12*i+3+2*1) - u[3*(2*i )+1] ),
assert_is_zero( np*(6*np*(np-1)+12*i+3+2*2) - u[3*(2*i )+2] ),
assert_is_zero( np*(6*np*(np-1)+12*i+3+2*0) - u[3*(2*i+1)+0] ),
assert_is_zero( np*(6*np*(np-1)+12*i+3+2*1) - u[3*(2*i+1)+1] ),
assert_is_zero( np*(6*np*(np-1)+12*i+3+2*2) - u[3*(2*i+1)+2] );
free(u);
u = malloc(np*2*3*sizeof(real));
for(i=0;i<np;++i)
u[2*np*0+(2*i )] = (real)( 3*(2*np*id + 2*i ) + 0 ),
u[2*np*1+(2*i )] = (real)( 3*(2*np*id + 2*i ) + 1 ),
u[2*np*2+(2*i )] = (real)( 3*(2*np*id + 2*i ) + 2 ),
u[2*np*0+(2*i+1)] = (real)( 3*(2*np*id + 2*i+1) + 0 ),
u[2*np*1+(2*i+1)] = (real)( 3*(2*np*id + 2*i+1) + 1 ),
u[2*np*2+(2*i+1)] = (real)( 3*(2*np*id + 2*i+1) + 2 );
i=1, maxv=3, fgs_op_many(&handle,u,u+2*np,u+4*np,0,0,0,&maxv,
&datatype,&i,&transpose);
for(i=0;i<np;++i)
assert_is_zero( np*(6*np*(np-1)+12*i+3+2*0) - u[2*np*0+(2*i )] ),
assert_is_zero( np*(6*np*(np-1)+12*i+3+2*1) - u[2*np*1+(2*i )] ),
assert_is_zero( np*(6*np*(np-1)+12*i+3+2*2) - u[2*np*2+(2*i )] ),
assert_is_zero( np*(6*np*(np-1)+12*i+3+2*0) - u[2*np*0+(2*i+1)] ),
assert_is_zero( np*(6*np*(np-1)+12*i+3+2*1) - u[2*np*1+(2*i+1)] ),
assert_is_zero( np*(6*np*(np-1)+12*i+3+2*2) - u[2*np*2+(2*i+1)] );
free(u);
fgs_free(&handle);
printf("test on node %d/%d succeeded\n", (int)id+1, (int)np);
#ifdef MPI
MPI_Comm_free(&comm);
MPI_Finalize();
#endif
return 0;
}
示例6: MPI_Finalize
//.........这里部分代码省略.........
} else {
ierr = PetscPrintf(PETSC_COMM_WORLD,"There are %D unused database options. They are:\n",nopt);CHKERRQ(ierr);
}
}
#if defined(PETSC_USE_DEBUG)
if (nopt && !flg3 && !flg1) {
ierr = PetscPrintf(PETSC_COMM_WORLD,"WARNING! There are options you set that were not used!\n");CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,"WARNING! could be spelling mistake, etc!\n");CHKERRQ(ierr);
ierr = PetscOptionsLeft();CHKERRQ(ierr);
} else if (nopt && flg3) {
#else
if (nopt && flg3) {
#endif
ierr = PetscOptionsLeft();CHKERRQ(ierr);
}
}
{
PetscThreadComm tcomm_world;
ierr = PetscGetThreadCommWorld(&tcomm_world);CHKERRQ(ierr);
/* Free global thread communicator */
ierr = PetscThreadCommDestroy(&tcomm_world);CHKERRQ(ierr);
}
/*
List all objects the user may have forgot to free
*/
ierr = PetscOptionsHasName(NULL,"-objects_dump",&flg1);CHKERRQ(ierr);
if (flg1) {
MPI_Comm local_comm;
char string[64];
ierr = PetscOptionsGetString(NULL,"-objects_dump",string,64,NULL);CHKERRQ(ierr);
ierr = MPI_Comm_dup(MPI_COMM_WORLD,&local_comm);CHKERRQ(ierr);
ierr = PetscSequentialPhaseBegin_Private(local_comm,1);CHKERRQ(ierr);
ierr = PetscObjectsDump(stdout,(string[0] == 'a') ? PETSC_TRUE : PETSC_FALSE);CHKERRQ(ierr);
ierr = PetscSequentialPhaseEnd_Private(local_comm,1);CHKERRQ(ierr);
ierr = MPI_Comm_free(&local_comm);CHKERRQ(ierr);
}
PetscObjectsCounts = 0;
PetscObjectsMaxCounts = 0;
ierr = PetscFree(PetscObjects);CHKERRQ(ierr);
#if defined(PETSC_USE_LOG)
ierr = PetscLogDestroy();CHKERRQ(ierr);
#endif
/*
Destroy any packages that registered a finalize
*/
ierr = PetscRegisterFinalizeAll();CHKERRQ(ierr);
/*
Destroy all the function registration lists created
*/
ierr = PetscFinalize_DynamicLibraries();CHKERRQ(ierr);
/*
Print PetscFunctionLists that have not been properly freed
ierr = PetscFunctionListPrintAll();CHKERRQ(ierr);
*/
if (petsc_history) {
ierr = PetscCloseHistoryFile(&petsc_history);CHKERRQ(ierr);
示例7: main
int main( int argc, char **argv )
{
int size, rank, key, his_key, lrank, result;
MPI_Comm myComm;
MPI_Comm myFirstComm;
MPI_Comm mySecondComm;
int errors = 0, sum_errors;
MPI_Status status;
/* Initialization */
MPI_Init ( &argc, &argv );
MPI_Comm_rank ( MPI_COMM_WORLD, &rank);
if (verbose) printf("[%d] MPI_Init complete!\n",rank);fflush(stdout);
MPI_Comm_size ( MPI_COMM_WORLD, &size);
/* Only works for 2 or more processes */
if (size >= 2) {
MPI_Comm merge1, merge2, merge3, merge4;
/* Generate membership key in the range [0,1] */
key = rank % 2;
MPI_Comm_split ( MPI_COMM_WORLD, key, rank, &myComm );
/* This creates an intercomm that is the size of comm world
but has processes grouped by even and odd */
MPI_Intercomm_create (myComm, 0, MPI_COMM_WORLD, (key+1)%2, 1,
&myFirstComm );
/* Dup an intercomm */
MPI_Comm_dup ( myFirstComm, &mySecondComm );
MPI_Comm_rank( mySecondComm, &lrank );
his_key = -1;
if (verbose) printf("[%d] Communicators created!\n",rank);fflush(stdout);
/* Leaders communicate with each other */
if (lrank == 0) {
MPI_Sendrecv (&key, 1, MPI_INT, 0, 0,
&his_key, 1, MPI_INT, 0, 0, mySecondComm, &status);
if (key != (his_key+1)%2) {
printf( "Received %d but expected %d\n", his_key, (his_key+1)%2 );
errors++;
}
}
if (verbose) printf("[%d] MPI_Sendrecv completed!\n",rank);fflush(stdout);
if (errors)
printf("[%d] Failed!\n",rank);
if (verbose) printf( "About to merge intercommunicators\n" );fflush(stdout);
MPI_Intercomm_merge ( mySecondComm, key, &merge1 );
if (verbose) printf( "merge1 done\n" );fflush(stdout);
MPI_Intercomm_merge ( mySecondComm, (key+1)%2, &merge2 );
if (verbose) printf( "merge2 done\n" );fflush(stdout);
MPI_Intercomm_merge ( mySecondComm, 0, &merge3 );
if (verbose) printf( "merge3 done\n" );fflush(stdout);
MPI_Intercomm_merge ( mySecondComm, 1, &merge4 );
if (verbose) printf( "merge4 done\n" );fflush(stdout);
if (verbose) printf("[%d] MPI_Intercomm_merge completed!\n",rank);fflush(stdout);
/* We should check that these are correct! An easy test is that
the merged comms are all MPI_SIMILAR (unless 2 processes used,
in which case MPI_CONGRUENT is ok */
MPI_Comm_compare( merge1, MPI_COMM_WORLD, &result );
if ((size > 2 && result != MPI_SIMILAR) ||
(size == 2 && result != MPI_CONGRUENT)) {
errors ++;
printf( "merge1 is not the same size as comm world\n" );
}
/* merge 2 isn't ordered the same way as the others, even for 2 processes */
MPI_Comm_compare( merge2, MPI_COMM_WORLD, &result );
if (result != MPI_SIMILAR) {
errors ++;
printf( "merge2 is not the same size as comm world\n" );
}
MPI_Comm_compare( merge3, MPI_COMM_WORLD, &result );
if ((size > 2 && result != MPI_SIMILAR) ||
(size == 2 && result != MPI_CONGRUENT)) {
errors ++;
printf( "merge3 is not the same size as comm world\n" );
}
MPI_Comm_compare( merge4, MPI_COMM_WORLD, &result );
if ((size > 2 && result != MPI_SIMILAR) ||
(size == 2 && result != MPI_CONGRUENT)) {
errors ++;
printf( "merge4 is not the same size as comm world\n" );
}
if (verbose) printf("[%d] MPI_Comm_compare completed!\n",rank);fflush(stdout);
/* Free communicators */
if (verbose) printf( "About to free communicators\n" );
MPI_Comm_free( &myComm );
MPI_Comm_free( &myFirstComm );
MPI_Comm_free( &mySecondComm );
MPI_Comm_free( &merge1 );
MPI_Comm_free( &merge2 );
MPI_Comm_free( &merge3 );
MPI_Comm_free( &merge4 );
//.........这里部分代码省略.........
示例8: MTestGetIntercomm
//.........这里部分代码省略.........
break;
case 3:
/* Split comm world in half, then dup */
merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (merr) MTestPrintError( merr );
merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
if (merr) MTestPrintError( merr );
if (size > 1) {
merr = MPI_Comm_split( MPI_COMM_WORLD, (rank < size/2), rank,
&mcomm );
if (merr) MTestPrintError( merr );
if (rank == 0) {
rleader = size/2;
}
else if (rank == size/2) {
rleader = 0;
}
else {
/* Remote leader is signficant only for the processes
designated local leaders */
rleader = -1;
}
*isLeftGroup = rank < size/2;
merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, rleader,
12345, comm );
if (merr) MTestPrintError( merr );
/* avoid leaking after assignment below */
merr = MPI_Comm_free( &mcomm );
if (merr) MTestPrintError( merr );
/* now dup, some bugs only occur for dup's of intercomms */
mcomm = *comm;
merr = MPI_Comm_dup(mcomm, comm);
if (merr) MTestPrintError( merr );
interCommName = "Intercomm by splitting MPI_COMM_WORLD then dup'ing";
}
else
*comm = MPI_COMM_NULL;
break;
case 4:
/* Split comm world in half, form intercomm, then split that intercomm */
merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (merr) MTestPrintError( merr );
merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
if (merr) MTestPrintError( merr );
if (size > 1) {
merr = MPI_Comm_split( MPI_COMM_WORLD, (rank < size/2), rank,
&mcomm );
if (merr) MTestPrintError( merr );
if (rank == 0) {
rleader = size/2;
}
else if (rank == size/2) {
rleader = 0;
}
else {
/* Remote leader is signficant only for the processes
designated local leaders */
rleader = -1;
}
*isLeftGroup = rank < size/2;
merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, rleader,
12345, comm );
if (merr) MTestPrintError( merr );
示例9: MTestGetIntracommGeneral
/*
* Get an intracommunicator with at least min_size members. If "allowSmaller"
* is true, allow the communicator to be smaller than MPI_COMM_WORLD and
* for this routine to return MPI_COMM_NULL for some values. Returns 0 if
* no more communicators are available.
*/
int MTestGetIntracommGeneral( MPI_Comm *comm, int min_size, int allowSmaller )
{
int size, rank, merr;
int done2, done=0;
int isBasic = 0;
/* The while loop allows us to skip communicators that are too small.
MPI_COMM_NULL is always considered large enough */
while (!done) {
isBasic = 0;
intraCommName = "";
switch (intraCommIdx) {
case 0:
*comm = MPI_COMM_WORLD;
isBasic = 1;
intraCommName = "MPI_COMM_WORLD";
break;
case 1:
/* dup of world */
merr = MPI_Comm_dup(MPI_COMM_WORLD, comm );
if (merr) MTestPrintError( merr );
intraCommName = "Dup of MPI_COMM_WORLD";
break;
case 2:
/* reverse ranks */
merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
if (merr) MTestPrintError( merr );
merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (merr) MTestPrintError( merr );
merr = MPI_Comm_split( MPI_COMM_WORLD, 0, size-rank, comm );
if (merr) MTestPrintError( merr );
intraCommName = "Rank reverse of MPI_COMM_WORLD";
break;
case 3:
/* subset of world, with reversed ranks */
merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
if (merr) MTestPrintError( merr );
merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (merr) MTestPrintError( merr );
merr = MPI_Comm_split( MPI_COMM_WORLD, ((rank < size/2) ? 1 : MPI_UNDEFINED),
size-rank, comm );
if (merr) MTestPrintError( merr );
intraCommName = "Rank reverse of half of MPI_COMM_WORLD";
break;
case 4:
*comm = MPI_COMM_SELF;
isBasic = 1;
intraCommName = "MPI_COMM_SELF";
break;
/* These next cases are communicators that include some
but not all of the processes */
case 5:
case 6:
case 7:
case 8:
{
int newsize;
merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
if (merr) MTestPrintError( merr );
newsize = size - (intraCommIdx - 4);
if (allowSmaller && newsize >= min_size) {
merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (merr) MTestPrintError( merr );
merr = MPI_Comm_split( MPI_COMM_WORLD, rank < newsize, rank,
comm );
if (merr) MTestPrintError( merr );
if (rank >= newsize) {
merr = MPI_Comm_free( comm );
if (merr) MTestPrintError( merr );
*comm = MPI_COMM_NULL;
}
else {
intraCommName = "Split of WORLD";
}
}
else {
/* Act like default */
*comm = MPI_COMM_NULL;
intraCommIdx = -1;
}
}
break;
/* Other ideas: dup of self, cart comm, graph comm */
default:
*comm = MPI_COMM_NULL;
intraCommIdx = -1;
break;
}
if (*comm != MPI_COMM_NULL) {
merr = MPI_Comm_size( *comm, &size );
//.........这里部分代码省略.........
示例10: main
int main(int argc, char **argv)
{
int i;
char *env = NULL;
simulation_data sim;
simulation_data_ctor(&sim);
#ifdef PARALLEL
/* Initialize MPI */
MPI_Init(&argc, &argv);
/* Create a new communicator. */
if (MPI_Comm_dup(MPI_COMM_WORLD, &sim.par_comm) != MPI_SUCCESS)
sim.par_comm = MPI_COMM_WORLD;
MPI_Comm_rank (sim.par_comm, &sim.par_rank);
MPI_Comm_size (sim.par_comm, &sim.par_size);
#endif
/* Process command line arguments. */
SimulationArguments(argc, argv);
for(i = 1; i < argc; ++i)
{
if(strcmp(argv[i], "-nobounds") == 0)
sim.doDomainBoundaries = 0;
}
#ifdef PARALLEL
/* Install callback functions for global communication. */
VisItSetBroadcastIntFunction2(visit_broadcast_int_callback, (void*)&sim);
VisItSetBroadcastStringFunction2(visit_broadcast_string_callback, (void*)&sim);
/* Tell libsim whether the simulation is parallel. */
VisItSetParallel(sim.par_size > 1);
VisItSetParallelRank(sim.par_rank);
/* Tell libsim which communicator to use. You must pass the address of
* an MPI_Comm object.
*/
VisItSetMPICommunicator((void *)&sim.par_comm);
#endif
/* Only read the environment on rank 0. This could happen before MPI_Init if
* we are using an MPI that does not like to let us spawn processes but we
* would not know our processor rank.
*/
if(sim.par_rank == 0)
env = VisItGetEnvironment();
/* Pass the environment to all other processors collectively. */
VisItSetupEnvironment2(env);
if(env != NULL)
free(env);
/* Write out .sim file that VisIt uses to connect. Only do it
* on processor 0.
*/
/* CHANGE 3 */
if(sim.par_rank == 0)
{
/* Write out .sim file that VisIt uses to connect. */
VisItInitializeSocketAndDumpSimFile(
#ifdef PARALLEL
"domainbounds_par",
#else
"domainbounds",
#endif
"Demonstrates domain boundaries",
"/path/to/where/sim/was/started",
NULL, NULL, SimulationFilename());
}
simulation_data_create_domains(&sim);
/* Call the main loop. */
mainloop(&sim);
simulation_data_dtor(&sim);
#ifdef PARALLEL
MPI_Finalize();
#endif
return 0;
}
示例11: test_idup
MTEST_THREAD_RETURN_TYPE test_idup(void *arg)
{
int i;
int size, rank;
int ranges[1][3];
int rleader, isLeft;
int *excl = NULL;
int tid = *(int *) arg;
MPI_Group ingroup, high_group, even_group;
MPI_Comm local_comm, inter_comm;
MPI_Comm idupcomms[NUM_IDUPS];
MPI_Request reqs[NUM_IDUPS];
MPI_Comm outcomm;
MPI_Comm incomm = comms[tid];
MPI_Comm_size(incomm, &size);
MPI_Comm_rank(incomm, &rank);
MPI_Comm_group(incomm, &ingroup);
/* Idup incomm multiple times */
for (i = 0; i < NUM_IDUPS; i++) {
MPI_Comm_idup(incomm, &idupcomms[i], &reqs[i]);
}
/* Overlap pending idups with various comm generation functions */
/* Comm_dup */
MPI_Comm_dup(incomm, &outcomm);
errs[tid] += MTestTestComm(outcomm);
MTestFreeComm(&outcomm);
/* Comm_split */
MPI_Comm_split(incomm, rank % 2, size - rank, &outcomm);
errs[tid] += MTestTestComm(outcomm);
MTestFreeComm(&outcomm);
/* Comm_create, high half of incomm */
ranges[0][0] = size / 2;
ranges[0][1] = size - 1;
ranges[0][2] = 1;
MPI_Group_range_incl(ingroup, 1, ranges, &high_group);
MPI_Comm_create(incomm, high_group, &outcomm);
MPI_Group_free(&high_group);
errs[tid] += MTestTestComm(outcomm);
MTestFreeComm(&outcomm);
/* Comm_create_group, even ranks of incomm */
/* exclude the odd ranks */
excl = malloc((size / 2) * sizeof(int));
for (i = 0; i < size / 2; i++)
excl[i] = (2 * i) + 1;
MPI_Group_excl(ingroup, size / 2, excl, &even_group);
free(excl);
if (rank % 2 == 0) {
MPI_Comm_create_group(incomm, even_group, 0, &outcomm);
}
else {
outcomm = MPI_COMM_NULL;
}
MPI_Group_free(&even_group);
errs[tid] += MTestTestComm(outcomm);
MTestFreeComm(&outcomm);
/* Intercomm_create & Intercomm_merge */
MPI_Comm_split(incomm, (rank < size / 2), rank, &local_comm);
if (rank == 0) {
rleader = size / 2;
}
else if (rank == size / 2) {
rleader = 0;
}
else {
rleader = -1;
}
isLeft = rank < size / 2;
MPI_Intercomm_create(local_comm, 0, incomm, rleader, 99, &inter_comm);
MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
MPI_Comm_free(&local_comm);
errs[tid] += MTestTestComm(inter_comm);
MTestFreeComm(&inter_comm);
errs[tid] += MTestTestComm(outcomm);
MTestFreeComm(&outcomm);
MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
for (i = 0; i < NUM_IDUPS; i++) {
errs[tid] += MTestTestComm(idupcomms[i]);
MPI_Comm_free(&idupcomms[i]);
}
MPI_Group_free(&ingroup);
return NULL;
}
示例12: main
int
main (int argc, char **argv)
{
int nprocs = -1;
int rank = -1;
int i, j;
int *granks;
char processor_name[128];
int namelen = 128;
int buf[buf_size];
MPI_Status status;
MPI_Comm temp;
MPI_Comm intercomm = MPI_COMM_NULL;
MPI_Comm dcomms[DCOMM_CALL_COUNT];
MPI_Group world_group, dgroup;
int intersize, dnprocs[DCOMM_CALL_COUNT], drank[DCOMM_CALL_COUNT];
int dims[TWOD], periods[TWOD], remain_dims[TWOD];
int graph_index[] = { 2, 3, 4, 6 };
int graph_edges[] = { 1, 3, 0, 3, 0, 2 };
/* init */
MPI_Init (&argc, &argv);
MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
MPI_Get_processor_name (processor_name, &namelen);
printf ("(%d) is alive on %s\n", rank, processor_name);
fflush (stdout);
MPI_Barrier (MPI_COMM_WORLD);
/* probably want number to be higher... */
if (nprocs < 4) {
printf ("not enough tasks\n");
}
else {
if (DCOMM_CALL_COUNT > 0) {
#ifdef RUN_COMM_DUP
/* create all of the derived communicators... */
/* simplest is created by MPI_Comm_dup... */
MPI_Comm_dup (MPI_COMM_WORLD, &dcomms[0]);
#else
dcomms[0] = MPI_COMM_NULL;
#endif
}
if (DCOMM_CALL_COUNT > 1) {
#ifdef RUN_COMM_CREATE
/* use subset of MPI_COMM_WORLD group for MPI_Comm_create... */
MPI_Comm_group (MPI_COMM_WORLD, &world_group);
granks = (int *) malloc (sizeof(int) * (nprocs/2));
for (i = 0; i < nprocs/2; i++)
granks [i] = 2 * i;
MPI_Group_incl (world_group, nprocs/2, granks, &dgroup);
MPI_Comm_create (MPI_COMM_WORLD, dgroup, &dcomms[1]);
MPI_Group_free (&world_group);
MPI_Group_free (&dgroup);
free (granks);
#else
dcomms[1] = MPI_COMM_NULL;
#endif
}
if (DCOMM_CALL_COUNT > 2) {
#ifdef RUN_COMM_SPLIT
/* split into thirds with inverted ranks... */
MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &dcomms[2]);
#else
dcomms[2] = MPI_COMM_NULL;
#endif
}
#ifdef RUN_INTERCOMM_CREATE
if ((DCOMM_CALL_COUNT < 2) || (dcomms[2] == MPI_COMM_NULL)) {
MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &temp);
}
else {
temp = dcomms[2];
}
if (rank % 3) {
MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD,
(((nprocs % 3) == 2) && ((rank % 3) == 2)) ?
nprocs - 1 : nprocs - (rank % 3) - (nprocs % 3),
INTERCOMM_CREATE_TAG, &intercomm);
}
if ((DCOMM_CALL_COUNT < 2) || (dcomms[2] == MPI_COMM_NULL)) {
MPI_Comm_free (&temp);
}
#endif
if (DCOMM_CALL_COUNT > 3) {
#ifdef RUN_CART_CREATE
/* create a 2 X nprocs/2 torus topology, allow reordering */
dims[0] = 2;
dims[1] = nprocs/2;
periods[0] = periods[1] = 1;
MPI_Cart_create (MPI_COMM_WORLD, TWOD, dims, periods, 1, &dcomms[3]);
#else
dcomms[3] = MPI_COMM_NULL;
#endif
}
//.........这里部分代码省略.........
示例13: main
int main( int argc, char* argv[] ) {
MPI_Comm CommWorld;
int rank;
int numProcessors;
int procToWatch;
Dictionary* dictionary;
AbstractContext* abstractContext;
/* Initialise MPI, get world info */
MPI_Init( &argc, &argv );
MPI_Comm_dup( MPI_COMM_WORLD, &CommWorld );
MPI_Comm_size( CommWorld, &numProcessors );
MPI_Comm_rank( CommWorld, &rank );
BaseFoundation_Init( &argc, &argv );
BaseIO_Init( &argc, &argv );
BaseContainer_Init( &argc, &argv );
BaseAutomation_Init( &argc, &argv );
BaseExtensibility_Init( &argc, &argv );
BaseContext_Init( &argc, &argv );
stream = Journal_Register (Info_Type, "myStream");
/* Redirect the error stream to stdout, so we can check warnings
appear correctly */
Stream_SetFileBranch( Journal_GetTypedStream( ErrorStream_Type ), stJournal->stdOut );
if( argc >= 2 ) {
procToWatch = atoi( argv[1] );
}
else {
procToWatch = 0;
}
if( rank == procToWatch ) Journal_Printf( (void*) stream, "Watching rank: %i\n", rank );
/* Read input */
dictionary = Dictionary_New();
dictionary->add( dictionary, "rank", Dictionary_Entry_Value_FromUnsignedInt( rank ) );
dictionary->add( dictionary, "numProcessors", Dictionary_Entry_Value_FromUnsignedInt( numProcessors ) );
/* Build the context */
abstractContext = _AbstractContext_New(
sizeof(AbstractContext),
"TestContext",
MyDelete,
MyPrint,
NULL,
NULL,
NULL,
_AbstractContext_Build,
_AbstractContext_Initialise,
_AbstractContext_Execute,
_AbstractContext_Destroy,
"context",
True,
MySetDt,
0,
10,
CommWorld,
dictionary );
/* add hooks to existing entry points */
ContextEP_Append( abstractContext, AbstractContext_EP_Dt, MyDt );
if( rank == procToWatch ) {
Stream* stream = Journal_Register( InfoStream_Type, AbstractContext_Type );
Stg_Component_Build( abstractContext, 0 /* dummy */, False );
Stg_Component_Initialise( abstractContext, 0 /* dummy */, False );
Context_PrintConcise( abstractContext, stream );
Stg_Component_Execute( abstractContext, 0 /* dummy */, False );
Stg_Component_Destroy( abstractContext, 0 /* dummy */, False );
}
/* Stg_Class_Delete stuff */
Stg_Class_Delete( abstractContext );
Stg_Class_Delete( dictionary );
BaseContext_Finalise();
BaseExtensibility_Finalise();
BaseAutomation_Finalise();
BaseContainer_Finalise();
BaseIO_Finalise();
BaseFoundation_Finalise();
/* Close off MPI */
MPI_Finalize();
return 0; /* success */
}
示例14: main
int main(int argc, char *argv[])
{
int errs = 0;
int attrval;
int i, key[32], keyval, saveKeyval;
MPI_Comm comm, dupcomm;
MTest_Init(&argc, &argv);
while (MTestGetIntracomm(&comm, 1)) {
if (comm == MPI_COMM_NULL)
continue;
MPI_Comm_create_keyval(copy_fn, delete_fn, &keyval, (void *) 0);
saveKeyval = keyval; /* in case we need to free explicitly */
attrval = 1;
MPI_Comm_set_attr(comm, keyval, (void *) &attrval);
/* See MPI-1, 5.7.1. Freeing the keyval does not remove it if it
* is in use in an attribute */
MPI_Comm_free_keyval(&keyval);
/* We create some dummy keyvals here in case the same keyval
* is reused */
for (i = 0; i < 32; i++) {
MPI_Comm_create_keyval(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN, &key[i], (void *) 0);
}
MPI_Comm_dup(comm, &dupcomm);
/* Check that the attribute was copied */
if (attrval != 2) {
errs++;
printf("Attribute not incremented when comm dup'ed (%s)\n", MTestGetIntracommName());
}
MPI_Comm_free(&dupcomm);
if (attrval != 1) {
errs++;
printf("Attribute not decremented when dupcomm %s freed\n", MTestGetIntracommName());
}
/* Check that the attribute was freed in the dupcomm */
if (comm != MPI_COMM_WORLD && comm != MPI_COMM_SELF) {
MPI_Comm_free(&comm);
/* Check that the original attribute was freed */
if (attrval != 0) {
errs++;
printf("Attribute not decremented when comm %s freed\n", MTestGetIntracommName());
}
}
else {
/* Explicitly delete the attributes from world and self */
MPI_Comm_delete_attr(comm, saveKeyval);
}
/* Free those other keyvals */
for (i = 0; i < 32; i++) {
MPI_Comm_free_keyval(&key[i]);
}
}
MTest_Finalize(errs);
MPI_Finalize();
/* The attributes on comm self and world were deleted by finalize
* (see separate test) */
return 0;
}
示例15: main
int main( int argc, char* argv[] ) {
MPI_Comm CommWorld;
int rank;
int numProcessors;
int procToWatch;
Dictionary* dictionary;
Snac_Context* snacContext;
Tetrahedra_Index tetraIndex;
Element_Index elementIndex;
double minLengthScale;
/* Initialise MPI, get world info */
MPI_Init( &argc, &argv );
Snac_Init( &argc, &argv );
MPI_Comm_dup( MPI_COMM_WORLD, &CommWorld );
MPI_Comm_size( CommWorld, &numProcessors );
MPI_Comm_rank( CommWorld, &rank );
if( argc >= 2 ) {
procToWatch = atoi( argv[1] );
}
else {
procToWatch = 0;
}
if( rank == procToWatch ) printf( "Watching rank: %i\n", rank );
/* Read input */
dictionary = Dictionary_New();
dictionary->add( dictionary, "rank", Dictionary_Entry_Value_FromUnsignedInt( rank ) );
dictionary->add( dictionary, "numProcessors", Dictionary_Entry_Value_FromUnsignedInt( numProcessors ) );
dictionary->add( dictionary, "meshSizeI", Dictionary_Entry_Value_FromUnsignedInt( 4 ) );
dictionary->add( dictionary, "meshSizeJ", Dictionary_Entry_Value_FromUnsignedInt( 4 ) );
dictionary->add( dictionary, "meshSizeK", Dictionary_Entry_Value_FromUnsignedInt( 4 ) );
dictionary->add( dictionary, "minX", Dictionary_Entry_Value_FromDouble( 0.0f ) );
dictionary->add( dictionary, "minY", Dictionary_Entry_Value_FromDouble( -300.0f ) );
dictionary->add( dictionary, "minZ", Dictionary_Entry_Value_FromDouble( 0.0f ) );
dictionary->add( dictionary, "maxX", Dictionary_Entry_Value_FromDouble( 300.0f ) );
dictionary->add( dictionary, "maxY", Dictionary_Entry_Value_FromDouble( 0.0f ) );
dictionary->add( dictionary, "maxZ", Dictionary_Entry_Value_FromDouble( 300.0f ) );
/* Build the context */
snacContext = Snac_Context_New( 0.0f, 10.0f, sizeof(Snac_Node), sizeof(Snac_Element), CommWorld, dictionary );
/* Construction phase -----------------------------------------------------------------------------------------------*/
Stg_Component_Construct( snacContext, 0 /* dummy */, &snacContext, True );
/* Building phase ---------------------------------------------------------------------------------------------------*/
Stg_Component_Build( snacContext, 0 /* dummy */, False );
/* Initialisaton phase ----------------------------------------------------------------------------------------------*/
Stg_Component_Initialise( snacContext, 0 /* dummy */, False );
/* Work out the first element's tetrahedra values, and print them. */
printf( "Element: 0, Coords: (%g %g %g), (%g %g %g), (%g %g %g), (%g %g %g), (%g %g %g) (%g %g %g) (%g %g %g) (%g %g %g)\n",
Snac_Element_NodeCoord( snacContext, 0, 0 )[0],
Snac_Element_NodeCoord( snacContext, 0, 0 )[1],
Snac_Element_NodeCoord( snacContext, 0, 0 )[2],
Snac_Element_NodeCoord( snacContext, 0, 1 )[0],
Snac_Element_NodeCoord( snacContext, 0, 1 )[1],
Snac_Element_NodeCoord( snacContext, 0, 1 )[2],
Snac_Element_NodeCoord( snacContext, 0, 3 )[0],
Snac_Element_NodeCoord( snacContext, 0, 3 )[1],
Snac_Element_NodeCoord( snacContext, 0, 3 )[2],
Snac_Element_NodeCoord( snacContext, 0, 2 )[0],
Snac_Element_NodeCoord( snacContext, 0, 2 )[1],
Snac_Element_NodeCoord( snacContext, 0, 2 )[2],
Snac_Element_NodeCoord( snacContext, 0, 4 )[0],
Snac_Element_NodeCoord( snacContext, 0, 4 )[1],
Snac_Element_NodeCoord( snacContext, 0, 4 )[2],
Snac_Element_NodeCoord( snacContext, 0, 5 )[0],
Snac_Element_NodeCoord( snacContext, 0, 5 )[1],
Snac_Element_NodeCoord( snacContext, 0, 5 )[2],
Snac_Element_NodeCoord( snacContext, 0, 7 )[0],
Snac_Element_NodeCoord( snacContext, 0, 7 )[1],
Snac_Element_NodeCoord( snacContext, 0, 7 )[2],
Snac_Element_NodeCoord( snacContext, 0, 6 )[0],
Snac_Element_NodeCoord( snacContext, 0, 6 )[1],
Snac_Element_NodeCoord( snacContext, 0, 6 )[2] );
/* For each element, compare to the first element's */
for( elementIndex = 0; elementIndex < snacContext->mesh->elementLocalCount; elementIndex++ ) {
Bool error;
Snac_UpdateElementMomentum( (Context*)snacContext, elementIndex, &minLengthScale );
if( elementIndex == 0 ) {
for( tetraIndex = 0; tetraIndex < Tetrahedra_Count; tetraIndex++ ) {
Tetrahedra_Surface_Index faceIndex;
for( faceIndex = 0; faceIndex < Tetrahedra_Surface_Count; faceIndex++ ) {
printf( "Element: 0, Tetrahedra: %u, Face: %u, Normal: %g %g %g\n",
tetraIndex,
faceIndex,
Snac_Element_At( snacContext, elementIndex )->tetra[tetraIndex].surface[faceIndex].normal[0],
Snac_Element_At( snacContext, elementIndex )->tetra[tetraIndex].surface[faceIndex].normal[1],
Snac_Element_At( snacContext, elementIndex )->tetra[tetraIndex].surface[faceIndex].normal[2] );
}
}
}
else {
for( tetraIndex = 0, error = False; tetraIndex < Tetrahedra_Count; tetraIndex++ ) {
//.........这里部分代码省略.........