当前位置: 首页>>代码示例>>C++>>正文


C++ MPI_Barrier函数代码示例

本文整理汇总了C++中MPI_Barrier函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Barrier函数的具体用法?C++ MPI_Barrier怎么用?C++ MPI_Barrier使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了MPI_Barrier函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: main

int main( int argc, char *argv[] )
{
    int errs = 0;
    int *ranks;
    int *ranksout;
    MPI_Group gworld, grev, gself;
    MPI_Comm  comm;
    MPI_Comm  commrev;
    int rank, size, i;
    double start, end, time1, time2;

    MTest_Init( &argc, &argv );

    comm = MPI_COMM_WORLD;

    MPI_Comm_size( comm, &size );
    MPI_Comm_rank( comm, &rank );

    ranks    = malloc(size*sizeof(int));
    ranksout = malloc(size*sizeof(int));
    if (!ranks || !ranksout) {
        fprintf(stderr, "out of memory\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* generate a comm with the rank order reversed */
    MPI_Comm_split(comm, 0, (size-rank-1), &commrev);
    MPI_Comm_group(commrev, &grev);
    MPI_Comm_group(MPI_COMM_SELF, &gself);
    MPI_Comm_group(comm, &gworld);

    /* sanity check correctness first */
    for (i=0; i < size; i++) {
        ranks[i] = i;
        ranksout[i] = -1;
    }
    MPI_Group_translate_ranks(grev, size, ranks, gworld, ranksout);
    for (i=0; i < size; i++) {
        if (ranksout[i] != (size-i-1)) {
            if (rank == 0)
                printf("%d: (gworld) expected ranksout[%d]=%d, got %d\n", rank, i, (size-rank-1), ranksout[i]);
            ++errs;
        }
    }
    MPI_Group_translate_ranks(grev, size, ranks, gself, ranksout);
    for (i=0; i < size; i++) {
        int expected = (i == (size-rank-1) ? 0 : MPI_UNDEFINED);
        if (ranksout[i] != expected) {
            if (rank == 0)
                printf("%d: (gself) expected ranksout[%d]=%d, got %d\n", rank, i, expected, ranksout[i]);
            ++errs;
        }
    }

    /* now compare relative performance */

    /* we needs lots of procs to get a group large enough to have meaningful
     * numbers.  On most testing machines this means that we're oversubscribing
     * cores in a big way, which might perturb the timing results.  So we make
     * sure everyone started up and then everyone but rank 0 goes to sleep to
     * let rank 0 do all the timings. */
    MPI_Barrier(comm);

    if (rank != 0) {
        sleep(10);
    }
    else /* rank==0 */ {
        sleep(1); /* try to avoid timing while everyone else is making syscalls */

        MPI_Group_translate_ranks(grev, size, ranks, gworld, ranksout); /*throwaway iter*/
        start = MPI_Wtime();
        for (i = 0; i < NUM_LOOPS; ++i) {
            MPI_Group_translate_ranks(grev, size, ranks, gworld, ranksout);
        }
        end = MPI_Wtime();
        time1 = end - start;

        MPI_Group_translate_ranks(grev, size, ranks, gself, ranksout); /*throwaway iter*/
        start = MPI_Wtime();
        for (i = 0; i < NUM_LOOPS; ++i) {
            MPI_Group_translate_ranks(grev, size, ranks, gself, ranksout);
        }
        end = MPI_Wtime();
        time2 = end - start;

        /* complain if the "gworld" time exceeds 2x the "gself" time */
        if (fabs(time1 - time2) > (2.00 * time2)) {
            printf("too much difference in MPI_Group_translate_ranks performance:\n");
            printf("time1=%f time2=%f\n", time1, time2);
            printf("(fabs(time1-time2)/time2)=%f\n", (fabs(time1-time2)/time2));
            if (time1 < time2) {
                printf("also, (time1<time2) is surprising...\n");
            }
            ++errs;
        }
    }

    free(ranks);
    free(ranksout);

//.........这里部分代码省略.........
开发者ID:OngOngoing,项目名称:219351_homework,代码行数:101,代码来源:gtranksperf.c

示例2: main

int main(int argc, char **argv)
{
    int np[2];
    ptrdiff_t n[4], ni[4], no[4];
    ptrdiff_t alloc_local_forw, alloc_local_back, alloc_local, howmany;
    ptrdiff_t local_ni[4], local_i_start[4];
    ptrdiff_t local_n[4], local_start[4];
    ptrdiff_t local_no[4], local_o_start[4];
    double err, *in;
    pfft_complex *out;
    pfft_plan plan_forw=NULL, plan_back=NULL;
    MPI_Comm comm_cart_2d;

    /* Set size of FFT and process mesh */
    ni[0] = ni[1] = ni[2] = ni[3] = 8;
    n[0] = 13;
    n[1] = 14;
    n[2] = 15;
    n[3] = 17;
    for(int t=0; t<4; t++)
        no[t] = ni[t];
    np[0] = 2;
    np[1] = 2;
    howmany = 1;

    /* Initialize MPI and PFFT */
    MPI_Init(&argc, &argv);
    pfft_init();

    /* Create two-dimensional process grid of size np[0] x np[1], if possible */
    if( pfft_create_procmesh_2d(MPI_COMM_WORLD, np[0], np[1], &comm_cart_2d) ) {
        pfft_fprintf(MPI_COMM_WORLD, stderr, "Error: This test file only works with %d processes.\n", np[0]*np[1]);
        MPI_Finalize();
        return 1;
    }

    /* Get parameters of data distribution */
    alloc_local_forw = pfft_local_size_many_dft_r2c(4, n, ni, n, howmany,
                       PFFT_DEFAULT_BLOCKS, PFFT_DEFAULT_BLOCKS,
                       comm_cart_2d, PFFT_TRANSPOSED_OUT,
                       local_ni, local_i_start, local_n, local_start);

    alloc_local_back = pfft_local_size_many_dft_c2r(4, n, n, no, howmany,
                       PFFT_DEFAULT_BLOCKS, PFFT_DEFAULT_BLOCKS,
                       comm_cart_2d, PFFT_TRANSPOSED_IN,
                       local_n, local_start, local_no, local_o_start);

    /* Allocate enough memory for both trafos */
    alloc_local = (alloc_local_forw > alloc_local_back) ?
                  alloc_local_forw : alloc_local_back;
    in  = pfft_alloc_real(2 * alloc_local);
    out = pfft_alloc_complex(alloc_local);

    /* Plan parallel forward FFT */
    plan_forw = pfft_plan_many_dft_r2c(
                    4, n, ni, n, howmany, PFFT_DEFAULT_BLOCKS, PFFT_DEFAULT_BLOCKS,
                    in, out, comm_cart_2d, PFFT_FORWARD, PFFT_TRANSPOSED_OUT| PFFT_MEASURE| PFFT_DESTROY_INPUT);

    /* Plan parallel backward FFT */
    plan_back = pfft_plan_many_dft_c2r(
                    4, n, n, no, howmany, PFFT_DEFAULT_BLOCKS, PFFT_DEFAULT_BLOCKS,
                    out, in, comm_cart_2d, PFFT_BACKWARD, PFFT_TRANSPOSED_IN| PFFT_MEASURE| PFFT_DESTROY_INPUT);

    /* Initialize input with random numbers */
    pfft_init_input_real(4, ni, local_ni, local_i_start,
                         in);

    /* execute parallel forward FFT */
    pfft_execute(plan_forw);

    /* execute parallel backward FFT */
    pfft_execute(plan_back);

    /* Scale data */
    for(ptrdiff_t l=0; l < local_ni[0] * local_ni[1] * local_ni[2] * local_ni[3]; l++)
        in[l] /= (n[0]*n[1]*n[2]*n[3]);

    /* Print error of back transformed data */
    MPI_Barrier(MPI_COMM_WORLD);
    err = pfft_check_output_real(4, ni, local_ni, local_i_start, in, comm_cart_2d);
    pfft_printf(comm_cart_2d, "Error after one forward and backward trafo of size n=(%td, %td, %td, %td):\n", n[0], n[1], n[2], n[3]);
    pfft_printf(comm_cart_2d, "maxerror = %6.2e;\n", err);

    /* free mem and finalize */
    pfft_destroy_plan(plan_forw);
    pfft_destroy_plan(plan_back);
    MPI_Comm_free(&comm_cart_2d);
    pfft_free(in);
    pfft_free(out);
    MPI_Finalize();
    return 0;
}
开发者ID:v4m4,项目名称:pfft,代码行数:92,代码来源:simple_check_ousam_r2c_4d_transposed.c

示例3: main

int main (int argc, char *argv[])
{
    int numtasks, namelen, rank, dest = 1, tag = 111, source = 0, size, i, j;
    double start_time=0, elapsed_time=0, acum;
    double *outmsg, *inmsg;
    char hostname[256];
    MPI_Status status,status2;
    MPI_Request send_request,recv_request;
    
    if (argc < 2)
    {	printf("Usage: %s size [where size is the number elements (double) to send ]\n", argv[0]);
        return 0;
    }
    
    size = atoi(argv[1]);
    
    outmsg=(double*)malloc(sizeof(double)*size);
    if(outmsg==NULL)
    {
        printf("Unable to allocate memory\n");
        return;
    }
    inmsg=(double*)malloc(sizeof(double)*size);
    if(inmsg==NULL)
    {
        printf("Unable to allocate memory\n");
        return;
    }
    
    MPI_Init (&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &numtasks);   // get number of processes
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);       // get current process id
    MPI_Get_processor_name(hostname, &namelen); // get CPU name
    
    //Initialize the msg buffer to the rank id.
    for (i = 0; i < size; i++)
        outmsg[i] = rank;
    
    //Define as Source the left neighbour
    if (rank == 0)  source=numtasks-1;
    else            source=rank-1;
    //Define the destiny the rigth neighbour
    if(rank==numtasks-1) dest=0;
    else                 dest=rank+1;
    
    start_time = MPI_Wtime();
    acum=0;
    for (i=0; i<numtasks; i++) {
        if (rank==0) printf("it: %2d - Rank %d (%s) sending data (%g) to rank %d\n",i,rank, hostname, inmsg[0], dest);
        MPI_Isend(outmsg, size, MPI_DOUBLE, dest, tag,MPI_COMM_WORLD,&send_request);
        MPI_Recv (inmsg, size, MPI_DOUBLE, source, tag, MPI_COMM_WORLD,&status);
        acum = acum + inmsg[0];
        if (rank==0) printf("it: %2d - Rank %d received data (%g) from rank %d (acum=%g)\n",i,rank,outmsg[0],source,acum);
        MPI_Wait(&send_request, &status2);
        //Copy the inmsg to outmsg for the next iteration.
        for (j = 0; j < size; j++) outmsg[j] = inmsg[j];
    }

    MPI_Barrier(MPI_COMM_WORLD);
    elapsed_time = MPI_Wtime() - start_time;
    printf(" Rank %d: Elapsed time to send %6d double(s) across a ring made up by %2d (acum=%g) in %g ms\n", rank, size, numtasks, acum, elapsed_time*1e03);
    
    MPI_Finalize ();
}
开发者ID:jlerida,项目名称:hpc-course,代码行数:64,代码来源:mpi_nonblockingring.c

示例4: main

int main(int argc, char **argv) {
    int i, j, rank, nranks, peer, bufsize, errors;
    double  *win_buf, *src_buf, *dst_buf;
    MPI_Win buf_win;

    MTest_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nranks);

    bufsize = XDIM * YDIM * sizeof(double);
    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &win_buf);
    /* Alloc_mem is not required for the origin buffers for RMA operations - 
       just for the Win_create memory */
    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &src_buf);
    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &dst_buf);

    if (rank == 0)
        if (verbose) printf("MPI RMA Strided Put Test:\n");

    for (i = 0; i < XDIM*YDIM; i++) {
        *(win_buf  + i) = 1.0 + rank;
        *(src_buf + i) = 1.0 + rank;
    }

    MPI_Win_create(win_buf, bufsize, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &buf_win);

    peer = (rank+1) % nranks;

    /* Perform ITERATIONS strided put operations */

    for (i = 0; i < ITERATIONS; i++) {
      MPI_Aint idx_loc[SUB_YDIM];
      int idx_rem[SUB_YDIM];
      int blk_len[SUB_YDIM];
      MPI_Datatype src_type, dst_type;

      void *base_ptr = dst_buf;
      MPI_Aint base_int;

      MPI_Get_address(base_ptr, &base_int);

      if (rank == 0)
        if (verbose) printf(" + iteration %d\n", i);

      for (j = 0; j < SUB_YDIM; j++) {
        MPI_Get_address(&src_buf[j*XDIM], &idx_loc[j]);
        idx_loc[j] = idx_loc[j] - base_int;
        idx_rem[j] = j*XDIM*sizeof(double);
        blk_len[j] = SUB_XDIM*sizeof(double);
      }

      MPI_Type_create_hindexed(SUB_YDIM, blk_len, idx_loc, MPI_BYTE, &src_type);
      MPI_Type_create_indexed_block(SUB_YDIM, SUB_XDIM*sizeof(double), idx_rem, MPI_BYTE, &dst_type);

      MPI_Type_commit(&src_type);
      MPI_Type_commit(&dst_type);

      MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
      MPI_Put(base_ptr, 1, src_type, peer, 0, 1, dst_type, buf_win);
      MPI_Win_unlock(peer, buf_win);

      MPI_Type_free(&src_type);
      MPI_Type_free(&dst_type);
    }

    MPI_Barrier(MPI_COMM_WORLD);

    /* Verify that the results are correct */

    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
    errors = 0;
    for (i = 0; i < SUB_XDIM; i++) {
      for (j = 0; j < SUB_YDIM; j++) {
        const double actual   = *(win_buf + i + j*XDIM);
        const double expected = (1.0 + ((rank+nranks-1)%nranks));
        if (actual - expected > 1e-10) {
          SQUELCH( printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
              rank, j, i, expected, actual); );
          errors++;
          fflush(stdout);
        }
      }
开发者ID:jimmycao,项目名称:mpi-test,代码行数:83,代码来源:put_base.c

示例5: MPI_Barrier

void FieldStatic::finalize() {
    MPI_Barrier(MPI_COMM_WORLD);
}
开发者ID:bamx23,项目名称:diploma,代码行数:3,代码来源:field-static.cpp

示例6: MPI_Recv

double timeStepper::computeDt(int &numReads, int &numWrites)
{
  // Time step control
  array minSpeedTemp,maxSpeedTemp;
  array minSpeed,maxSpeed;
  elemOld->computeMinMaxCharSpeeds(directions::X1,
                                   minSpeedTemp, maxSpeedTemp,
                                   numReads,numWrites
                                  );
  minSpeedTemp = minSpeedTemp/XCoords->dX1;
  maxSpeedTemp = maxSpeedTemp/XCoords->dX1;
  maxSpeed     = af::max(maxSpeedTemp,af::abs(minSpeedTemp));

  if(params::dim>1)
  {
    elemOld->computeMinMaxCharSpeeds(directions::X2,
                                     minSpeedTemp, maxSpeedTemp,
                                     numReads,numWrites
                                    );
    minSpeedTemp = minSpeedTemp/XCoords->dX2;
    maxSpeedTemp = maxSpeedTemp/XCoords->dX2;
    maxSpeed    += af::max(maxSpeedTemp,af::abs(minSpeedTemp));
  }

  if(params::dim>2)
  {
    elemOld->computeMinMaxCharSpeeds(directions::X3,
                                     minSpeedTemp, maxSpeedTemp,
                                     numReads,numWrites);
    minSpeedTemp = minSpeedTemp/XCoords->dX3;
    maxSpeedTemp = maxSpeedTemp/XCoords->dX3;
    maxSpeed    += af::max(maxSpeedTemp,af::abs(minSpeedTemp));
  }
  array maxInvDt_af = af::max(af::max(af::max(maxSpeed,2),1),0);
  double maxInvDt = maxInvDt_af.host<double>()[0];

  /* Use MPI to find minimum over all processors */
  if (world_rank == 0) 
  {
    double temp; 
    for(int i=1;i<world_size;i++)
    {
      MPI_Recv(&temp, 1, MPI_DOUBLE, i, i, PETSC_COMM_WORLD,MPI_STATUS_IGNORE);
      if( maxInvDt < temp)
      {
        maxInvDt = temp;
      }
    }
  }
  else
  {
    MPI_Send(&maxInvDt, 1, MPI_DOUBLE, 0, world_rank, PETSC_COMM_WORLD);
  }
  MPI_Barrier(PETSC_COMM_WORLD);
  MPI_Bcast(&maxInvDt,1,MPI_DOUBLE,0,PETSC_COMM_WORLD);
  MPI_Barrier(PETSC_COMM_WORLD);
  
  double newDt = params::CourantFactor/maxInvDt;
    
  if (newDt > params::maxDtIncrement*dt)
  {
    newDt = params::maxDtIncrement*dt;
  }
  dt = newDt;
}
开发者ID:AFD-Illinois,项目名称:grim,代码行数:65,代码来源:timestep.cpp

示例7: test_mpio_special_collective


//.........这里部分代码省略.........
      	MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
	return 1;
      }

      if((mpi_err=MPI_Type_commit(&filetype))!=MPI_SUCCESS){
        MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
	return 1;
      }


      if((mpi_err= MPI_Type_hindexed(2,blocklens,offsets,etype,&buftype))
       != MPI_SUCCESS){
      	MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
	return 1;
      }

      if((mpi_err=MPI_Type_commit(&buftype))!=MPI_SUCCESS){
        MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
	return 1;
      }
     }
     else {

       filetype = MPI_BYTE;
       buftype  = MPI_BYTE;
     }

   /* Open a file */
    if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
	    MPI_MODE_RDWR | MPI_MODE_CREATE ,
	    MPI_INFO_NULL, &fh))
	    != MPI_SUCCESS){
	MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_File_open failed (%s)\n", mpi_err_str);
	return 1;
    }

    /* each process writes some data */
    for (i=0; i < 2*DIMSIZE; i++)
	writedata[i] = mpi_rank*DIMSIZE + i;


     mpi_off = 0;
    if((mpi_err = MPI_File_set_view(fh, mpi_off, MPI_BYTE, filetype, "native", MPI_INFO_NULL))
        != MPI_SUCCESS) {
	MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_File_set_view failed (%s)\n", mpi_err_str);
	return 1;
    }

    buf   = writedata;
    if ((mpi_err = MPI_File_write_at_all(fh, mpi_off, buf, bufcount, buftype,
	    &mpi_stat))
	    != MPI_SUCCESS){
	MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
		(long) mpi_off, bufcount, mpi_err_str);
	return 1;
    };

     if ((mpi_err = MPI_File_close(&fh))
	    != MPI_SUCCESS){
	MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_File_close failed. \n");
	return 1;
    };

    mpi_err = MPI_Barrier(MPI_COMM_WORLD);
#ifdef H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS
    if(retcode != 0) {
	if(mpi_rank == 0) {
	    printf("special collective IO is NOT working at this platform\n");
	    printf("Go back to hdf5/config and find the corresponding\n");
	    printf("configure-specific file (for example, powerpc-ibm-aix5.x) and add\n");
	    printf("hdf5_cv_mpi_special_collective_io_works=${hdf5_cv_mpi_special_collective_io_works='no'}\n");
	    printf(" at the end of the file.\n");
	    printf(" Please report to [email protected] about this problem.\n");
	}
	retcode = 1;
    }
#else
    if(retcode == 0) {
	if(mpi_rank == 0) {
	    printf(" This is NOT an error, What it really says is\n");
	    printf("special collective IO is WORKING at this platform\n");
	    printf(" Go back to hdf5/config and find the corresponding \n");
	    printf(" configure-specific file (for example, powerpc-ibm-aix5.x) and delete the line\n");
	    printf("hdf5_cv_mpi_special_collective_io_works=${hdf5_cv_mpi_special_collective_io_works='no'}\n");
	    printf(" at the end of the file.\n");
	    printf("Please report to [email protected] about this problem.\n");
	}
	retcode = 1;
    }
#endif
    return retcode;
}
开发者ID:Len3d,项目名称:appleseed,代码行数:101,代码来源:t_mpi.c

示例8: fprintf

	void system::set_problem(const bool init) 
	{
		if (myproc == 0)
			fprintf(stderr, " ********* Setting up MHD Turbulence ************* \n");

		const int reserve_n = (int)(1.25*local_n);
		U.reserve(reserve_n);
		dU.reserve(reserve_n);
		Wgrad.reserve(reserve_n);

		U.resize(local_n);
		dU.resize(local_n);
		Wgrad.resize(local_n);


		gamma_gas = 1.0;
		courant_no = 0.4;

		for (int i = 0; i < local_n; i++) 
		{
			assert(U[i][Fluid::DENS] > 0.0);
			U[i][Fluid::PSI ] = 0.0;

			for (int k = 0 ; k < Fluid::NSCALARS; k++)
				U[i].scal(k) = 1.0;

			dU[i] = Fluid(0.0);
			Wgrad[i] = 0.0;
			for (int k = 0; k < Fluid::NFLUID; k++)
				Wgrad[i].m[k] = U[i][k];
			U[i] = U[i].to_conservative(cells[i].Volume);
			ptcl[i].Volume = cells[i].Volume;
		}
		entropy_scalar = -1;
		isoeos_flag = true;

		MPI_Barrier(MPI_COMM_WORLD);
		if (myproc == 0)
			fprintf(stderr , " pvel ... \n");

		get_active_ptcl(true);


		MPI_Barrier(MPI_COMM_WORLD);
		if (myproc == 0)
			fprintf(stderr , " primitives ... \n");

		exchange_primitive_and_wdot();


		MPI_Barrier(MPI_COMM_WORLD);
		compute_pvel();
		exchange_pvel();

		MPI_Barrier(MPI_COMM_WORLD);
		if (myproc == 0)
			fprintf(stderr , " tgradients ... \n");
		compute_tgradient();

		if (myproc == 0)
			fprintf(stderr , " timestep... \n");
		compute_timesteps(true);
		for (int i = 0; i < local_n; i++)
			ptcl[i].rung[0] += 3;

		all_active = true;
		scheduler.flush_list();
		for (int i = 0; i < local_n; i++)
			scheduler.push_particle(i, (int)ptcl[i].rung[0]);

		MPI_Barrier(MPI_COMM_WORLD);
		if (!eulerian)
			clear_mesh();

		if (myproc == 0) fprintf(stderr, " proc= %d: complete problem setup \n", myproc);
		MPI_Barrier(MPI_COMM_WORLD);


	}
开发者ID:QirongZhu,项目名称:fvmhd3d,代码行数:79,代码来源:mhd_turbulence_sph.cpp

示例9: test_mpio_1wMr

static int
test_mpio_1wMr(char *filename, int special_request)
{
    char hostname[128];
    int  mpi_size, mpi_rank;
    MPI_File fh;
    char mpi_err_str[MPI_MAX_ERROR_STRING];
    int  mpi_err_strlen;
    int  mpi_err;
    unsigned char writedata[DIMSIZE], readdata[DIMSIZE];
    unsigned char expect_val;
    int  i, irank;
    int  nerrs = 0;		/* number of errors */
    int  atomicity;
    MPI_Offset  mpi_off;
    MPI_Status  mpi_stat;

    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);

    if (MAINPROCESS && VERBOSE_MED){
        printf("Testing one process writes, all processes read.\n");
	printf("Using %d processes accessing file %s\n", mpi_size, filename);
        printf("    (Filename can be specified via program argument)\n");
    }

    /* show the hostname so that we can tell where the processes are running */
    if (VERBOSE_DEF){
	if (gethostname(hostname, 128) < 0){
	    PRINTID;
	    printf("gethostname failed\n");
	    return 1;
	}
	PRINTID;
	printf("hostname=%s\n", hostname);
    }

    /* Delete any old file in order to start anew. */
    /* Must delete because MPI_File_open does not have a Truncate mode. */
    /* Don't care if it has error. */
    MPI_File_delete(filename, MPI_INFO_NULL);
    MPI_Barrier(MPI_COMM_WORLD);	/* prevent racing condition */

    if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
	    MPI_MODE_RDWR | MPI_MODE_CREATE ,
	    MPI_INFO_NULL, &fh))
	    != MPI_SUCCESS){
	MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	PRINTID;
	printf("MPI_File_open failed (%s)\n", mpi_err_str);
	return 1;
    }

if (special_request & USEATOM){
    /* ==================================================
     * Set atomcity to true (1).  A POSIX compliant filesystem
     * should not need this.
     * ==================================================*/
    if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS){
	MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	PRINTID;
	printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
    }
    if (VERBOSE_HI)
	printf("Initial atomicity = %d\n", atomicity);
    if ((mpi_err = MPI_File_set_atomicity(fh, 1)) != MPI_SUCCESS){
	MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	PRINTID;
	printf("MPI_File_set_atomicity failed (%s)\n", mpi_err_str);
    }
    if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS){
	MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	PRINTID;
	printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
    }
    if (VERBOSE_HI)
	printf("After set_atomicity atomicity = %d\n", atomicity);
}

    /* This barrier is not necessary but do it anyway. */
    MPI_Barrier(MPI_COMM_WORLD);
    if (VERBOSE_HI){
	PRINTID;
	printf("between MPI_Barrier and MPI_File_write_at\n");
    }

    /* ==================================================
     * Each process calculates what to write but
     * only process irank(0) writes.
     * ==================================================*/
    irank=0;
    for (i=0; i < DIMSIZE; i++)
	writedata[i] = irank*DIMSIZE + i;
    mpi_off = irank*DIMSIZE;

    /* Only one process writes */
    if (mpi_rank==irank){
	if (VERBOSE_HI){
	    PRINTID; printf("wrote %d bytes at %ld\n", DIMSIZE, (long)mpi_off);
	}
//.........这里部分代码省略.........
开发者ID:Len3d,项目名称:appleseed,代码行数:101,代码来源:t_mpi.c

示例10: test_mpio_derived_dtype


//.........这里部分代码省略.........
       != MPI_SUCCESS){
      	MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_Type_struct failed (%s)\n", mpi_err_str);
	return 1;
    }
    if((mpi_err=MPI_Type_commit(&adv_filetype))!=MPI_SUCCESS){
        MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
	return 1;
    }


    if((mpi_err = MPI_File_set_view(fh,disp,etype,adv_filetype,"native",MPI_INFO_NULL))!= MPI_SUCCESS){
      MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_File_set_view failed (%s)\n", mpi_err_str);
	return 1;
    }

    if((mpi_err = MPI_File_write(fh,buf,3,MPI_BYTE,&Status))!= MPI_SUCCESS){
        MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_File_write failed (%s)\n", mpi_err_str);
	return 1;
      ;
    }


    if((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS){
       MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_File_close failed (%s)\n", mpi_err_str);
	return 1;
    }


    if((mpi_err = MPI_File_open(MPI_COMM_WORLD,filename,MPI_MODE_RDONLY,MPI_INFO_NULL,&fh)) != MPI_SUCCESS){
       MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_File_open failed (%s)\n", mpi_err_str);
	return 1;
    }

    if((mpi_err = MPI_File_set_view(fh,0,MPI_BYTE,MPI_BYTE,"native",MPI_INFO_NULL))!= MPI_SUCCESS){
        MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_File_set_view failed (%s)\n", mpi_err_str);
	return 1;
    }
    if((mpi_err = MPI_File_read(fh,outbuf,3,MPI_BYTE,&Status))!=MPI_SUCCESS){
      MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
      printf("MPI_File_read failed (%s)\n", mpi_err_str);
      return 1;
    }

    if(outbuf[2]==2) {
       retcode = 0;
    }
    else {
/*      if(mpi_rank == 0) {
       printf("complicated derived datatype is NOT working at this platform\n");
       printf("go back to hdf5/config and find the corresponding\n");
       printf("configure-specific file and change ?????\n");
      }
*/
       retcode = -1;
   }

    if((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS){
       MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
	printf("MPI_File_close failed (%s)\n", mpi_err_str);
	return 1;
    }


    mpi_err = MPI_Barrier(MPI_COMM_WORLD);
#ifdef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
    if(retcode == -1) {
	if(mpi_rank == 0) {
	    printf("Complicated derived datatype is NOT working at this platform\n");
	    printf("Go back to hdf5/config and find the corresponding\n");
	    printf("configure-specific file (for example, powerpc-ibm-aix5.x) and add\n");
	    printf("hdf5_cv_mpi_complex_derived_datatype_works=${hdf5_cv_mpi_complex_derived_datatype-works='no'}\n");
	    printf(" at the end of the file.\n");
	    printf(" Please report to [email protected] about this problem.\n");
	}
	retcode = 1;
    }
#else
    if(retcode == 0) {
	if(mpi_rank == 0) {
	    printf(" This is NOT an error, What it really says is\n");
	    printf("Complicated derived datatype is WORKING at this platform\n");
	    printf(" Go back to hdf5/config and find the corresponding \n");
	    printf(" configure-specific file (for example, powerpc-ibm-aix5.x) and delete the line\n");
	    printf("hdf5_cv_mpi_complex_derived_datatype_works=${hdf5_cv_mpi_complex_derived_datatype-works='no'}\n");
	    printf(" at the end of the file.\n");
	    printf("Please report to [email protected] about this problem.\n");
	}
	retcode = 1;
    }
    if(retcode == -1) retcode = 0;
#endif
    return retcode;
}
开发者ID:Len3d,项目名称:appleseed,代码行数:101,代码来源:t_mpi.c

示例11: test_mpio_overlap_writes

static int
test_mpio_overlap_writes(char *filename)
{
    int mpi_size, mpi_rank;
    MPI_Comm comm;
    MPI_Info info = MPI_INFO_NULL;
    int color, mrc;
    MPI_File	fh;
    int i;
    int vrfyerrs, nerrs;
    unsigned char  buf[4093];		/* use some prime number for size */
    int bufsize = sizeof(buf);
    MPI_Offset  stride;
    MPI_Offset  mpi_off;
    MPI_Status  mpi_stat;


    if (VERBOSE_MED)
	printf("MPIO independent overlapping writes test on file %s\n",
	    filename);

    nerrs = 0;
    /* set up MPI parameters */
    MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);

    /* Need at least 2 processes */
    if (mpi_size < 2) {
	if (MAINPROCESS)
	    printf("Need at least 2 processes to run MPIO test.\n");
	    printf(" -SKIP- \n");
	return 0;
    }

    /* splits processes 0 to n-2 into one comm. and the last one into another */
    color = ((mpi_rank < (mpi_size - 1)) ? 0 : 1);
    mrc = MPI_Comm_split (MPI_COMM_WORLD, color, mpi_rank, &comm);
    VRFY((mrc==MPI_SUCCESS), "Comm_split succeeded");

    if (color==0){
	/* First n-1 processes (color==0) open a file and write it */
	mrc = MPI_File_open(comm, filename, MPI_MODE_CREATE|MPI_MODE_RDWR,
		info, &fh);
	VRFY((mrc==MPI_SUCCESS), "");

	stride = 1;
	mpi_off = mpi_rank*stride;
	while (mpi_off < MPIO_TEST_WRITE_SIZE){
	    /* make sure the write does not exceed the TEST_WRITE_SIZE */
	    if (mpi_off+stride > MPIO_TEST_WRITE_SIZE)
		stride = MPIO_TEST_WRITE_SIZE - mpi_off;

	    /* set data to some trivial pattern for easy verification */
	    for (i=0; i<stride; i++)
		buf[i] = (unsigned char)(mpi_off+i);
	    mrc = MPI_File_write_at(fh, mpi_off, buf, (int)stride, MPI_BYTE,
		    &mpi_stat);
	    VRFY((mrc==MPI_SUCCESS), "");

	    /* move the offset pointer to last byte written by all processes */
	    mpi_off += (mpi_size - 1 - mpi_rank) * stride;

	    /* Increase chunk size without exceeding buffer size. */
	    /* Then move the starting offset for next write. */
	    stride *= 2;
	    if (stride > bufsize)
		stride = bufsize;
	    mpi_off += mpi_rank*stride;
	}

	/* close file and free the communicator */
	mrc = MPI_File_close(&fh);
	VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");
	mrc = MPI_Comm_free(&comm);
	VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");

	/* sync with the other waiting processes */
	mrc = MPI_Barrier(MPI_COMM_WORLD);
	VRFY((mrc==MPI_SUCCESS), "Sync after writes");
    }else{
	/* last process waits till writes are done,
	 * then opens file to verify data.
	 */
	mrc = MPI_Barrier(MPI_COMM_WORLD);
	VRFY((mrc==MPI_SUCCESS), "Sync after writes");

	mrc = MPI_File_open(comm, filename, MPI_MODE_RDONLY,
		info, &fh);
	VRFY((mrc==MPI_SUCCESS), "");

	stride = bufsize;
	for (mpi_off=0; mpi_off < MPIO_TEST_WRITE_SIZE; mpi_off += bufsize){
	    /* make sure it does not read beyond end of data */
	    if (mpi_off+stride > MPIO_TEST_WRITE_SIZE)
		stride = MPIO_TEST_WRITE_SIZE - mpi_off;
	    mrc = MPI_File_read_at(fh, mpi_off, buf, (int)stride, MPI_BYTE,
		    &mpi_stat);
	    VRFY((mrc==MPI_SUCCESS), "");
	    vrfyerrs=0;
	    for (i=0; i<stride; i++){
//.........这里部分代码省略.........
开发者ID:Len3d,项目名称:appleseed,代码行数:101,代码来源:t_mpi.c

示例12: test_mpio_gb_file


//.........这里部分代码省略.........
	    INFO((mpi_off>0), "4GB OFFSET assignment no overflow");
	    INFO((mpi_off-1)==FOUR_GB_LESS1, "4GB OFFSET assignment succeed");

	    /* verify correctness of increasing from below 4 GB to above 4 GB */
	    mpi_off = FOUR_GB_LESS1;
	    for (i=0; i < 3; i++){
		mpi_off_old = mpi_off;
		mpi_off = mpi_off + 1;
		/* no overflow */
		INFO((mpi_off>0), "4GB OFFSET increment no overflow");
		/* correct inc. */
		INFO((mpi_off-1)==mpi_off_old, "4GB OFFSET increment succeed");
	    }
	}
    }

    /*
     * Verify if we can write to a file of multiple GB sizes.
     */
    if (VERBOSE_MED)
	printf("MPIO GB file test %s\n", filename);

    if (sizeof_mpi_offset <= 4){
	printf("Skipped GB file range test "
		"because MPI_Offset cannot support it\n");
    }else{
	buf = malloc(MB);
	VRFY((buf!=NULL), "malloc succeed");

	/* open a new file. Remove it first in case it exists. */
	/* Must delete because MPI_File_open does not have a Truncate mode. */
	/* Don't care if it has error. */
	MPI_File_delete(filename, MPI_INFO_NULL);
	MPI_Barrier(MPI_COMM_WORLD);	/* prevent racing condition */

	mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE|MPI_MODE_RDWR,
		    info, &fh);
	VRFY((mrc==MPI_SUCCESS), "MPI_FILE_OPEN");

	printf("MPIO GB file write test %s\n", filename);

	/* instead of writing every bytes of the file, we will just write
	 * some data around the 2 and 4 GB boundaries.  That should cover
	 * potential integer overflow and filesystem size limits.
	 */
	writerrs = 0;
	for (n=2; n <= 4; n+=2){
	    ntimes = GB/MB*n/mpi_size + 1;
	    for (i=ntimes-2; i <= ntimes; i++){
		mpi_off = (i*mpi_size + mpi_rank)*(MPI_Offset)MB;
		if (VERBOSE_MED)
		    HDfprintf(stdout,"proc %d: write to mpi_off=%016llx, %lld\n",
			mpi_rank, mpi_off, mpi_off);
		/* set data to some trivial pattern for easy verification */
		for (j=0; j<MB; j++)
		    *(buf+j) = i*mpi_size + mpi_rank;
		if (VERBOSE_MED)
		    HDfprintf(stdout,"proc %d: writing %d bytes at offset %lld\n",
			mpi_rank, MB, mpi_off);
		mrc = MPI_File_write_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat);
		INFO((mrc==MPI_SUCCESS), "GB size file write");
		if (mrc!=MPI_SUCCESS)
		    writerrs++;
	    }
	}
开发者ID:Len3d,项目名称:appleseed,代码行数:66,代码来源:t_mpi.c

示例13: main


//.........这里部分代码省略.........
    MPI_BANNER("MPIO independent overlapping writes...");
    ret_code = test_mpio_overlap_writes(filenames[0]);
    ret_code = errors_sum(ret_code);
    if (mpi_rank==0 && ret_code > 0){
	printf("***FAILED with %d total errors\n", ret_code);
	nerrors += ret_code;
    }

    /*=======================================
     * MPIO complicated derived datatype test
     *=======================================*/
    /* test_mpio_derived_dtype often hangs when fails.
     * Do not run it if it is known NOT working unless ask to
     * run explicitly by high verbose mode.
     */
#ifdef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
    MPI_BANNER("MPIO complicated derived datatype test...");
    ret_code = test_mpio_derived_dtype(filenames[0]);
#else
    if (VERBOSE_HI){
	MPI_BANNER("MPIO complicated derived datatype test...");
	ret_code = test_mpio_derived_dtype(filenames[0]);
    }else{
	MPI_BANNER("MPIO complicated derived datatype test SKIPPED.");
	ret_code = 0;	/* fake ret_code */
    }
#endif
    ret_code = errors_sum(ret_code);
    if (mpi_rank==0 && ret_code > 0){
	printf("***FAILED with %d total errors\n", ret_code);
	nerrors += ret_code;
    }

    /*=======================================
     * MPIO special collective IO  test
     *=======================================*/
    /* test_special_collective_io  often hangs when fails.
     * Do not run it if it is known NOT working unless ask to
     * run explicitly by high verbose mode.
     */
    if(mpi_size !=4){
      MPI_BANNER("MPIO special collective io test SKIPPED.");
      if(mpi_rank == 0){
        printf("Use FOUR processes to run this test\n");
        printf("If you still see the <test SKIPPED>, use <-vh> option to verify the test\n");
  }
      ret_code = 0;
      goto sc_finish;
    }

#ifdef H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS
    MPI_BANNER("MPIO special collective io test...");
    ret_code = test_mpio_special_collective(filenames[0]);

#else
    if (VERBOSE_HI){
	MPI_BANNER("MPIO special collective io test...");
	ret_code = test_mpio_special_collective(filenames[0]);
    }else{
	MPI_BANNER("MPIO special collective io test SKIPPED.");
	ret_code = 0;	/* fake ret_code */
    }
#endif

sc_finish:
    ret_code = errors_sum(ret_code);
    if (mpi_rank==0 && ret_code > 0){
	printf("***FAILED with %d total errors\n", ret_code);
	nerrors += ret_code;
    }


finish:
    /* make sure all processes are finished before final report, cleanup
     * and exit.
     */
    MPI_Barrier(MPI_COMM_WORLD);
    if (MAINPROCESS){		/* only process 0 reports */
	printf("===================================\n");
	if (nerrors){
	    printf("***MPI tests detected %d errors***\n", nerrors);
	}
	else{
	    printf("MPI tests finished with no errors\n");
	}
	printf("===================================\n");
    }

    /* turn off alarm */
    ALARM_OFF;

    h5_cleanup(FILENAME, fapl);
    H5close();

    /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
    MPI_Finalize();

    /* cannot just return (nerrors) because exit code is limited to 1byte */
    return(nerrors!=0);
}
开发者ID:Len3d,项目名称:appleseed,代码行数:101,代码来源:t_mpi.c

示例14: Init_ForecastData

ForecastData* Init_ForecastData(char* fcst_filename,unsigned int string_size)
{
	FILE* inputfile = NULL;
	ForecastData* Forecaster;
	int errorcode,valsread;
	char end_char;
	unsigned int buff_size = string_size + 20;
	char* linebuffer = (char*) malloc(buff_size*sizeof(char));
	MPI_Barrier(MPI_COMM_WORLD);

	if(my_rank == 0)
	{
		//Open file
		inputfile = fopen(fcst_filename,"r");
		errorcode = 0;
		if(!inputfile)
		{
			printf("[%i]: Error opening file %s.\n",my_rank,fcst_filename);
			errorcode = 1;
		}
	}

	//Check if forecast file was openned
	MPI_Bcast(&errorcode,1,MPI_INT,0,MPI_COMM_WORLD);
	if(errorcode)	return NULL;

	//Reserve space
	Forecaster = (ForecastData*) malloc(sizeof(ForecastData));
	Forecaster->model_name = (char*) malloc(string_size*sizeof(char));

	//Read table name
	//if(my_rank == 0)
	{
		ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
		valsread = sscanf(linebuffer,"%s",Forecaster->model_name);
		if(ReadLineError(valsread,1,"forecaster model name"))	return NULL;
		//length = strlen(Forecaster->model_name);
	}
	//MPI_Bcast(&length,1,MPI_UNSIGNED,0,MPI_COMM_WORLD);
	//MPI_Bcast(Forecaster->model_name,length+1,MPI_CHAR,0,MPI_COMM_WORLD);

	//Read if data is displayed on ifis
	//if(my_rank == 0)
	{
		ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
		valsread = sscanf(linebuffer,"%hi",&(Forecaster->ifis_display));
		if(ReadLineError(valsread,1,"flag if displaying on ifis"))	return NULL;
	}
	//MPI_Bcast(&(Forecaster->ifis_display),1,MPI_SHORT,0,MPI_COMM_WORLD);

	//Read which forcing index is used for forecasting
	//if(my_rank == 0)
	{
		ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
		valsread = sscanf(linebuffer,"%u",&(Forecaster->forecasting_forcing));
		if(ReadLineError(valsread,1,"index of forecastin forcing"))	return NULL;
	}
	//MPI_Bcast(&(Forecaster->forecasting_forcing),1,MPI_UNSIGNED,0,MPI_COMM_WORLD);

	//Read number of rainfall steps to use per forecast
	//if(my_rank == 0)
	{
		ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
		valsread = sscanf(linebuffer,"%u",&(Forecaster->num_rainsteps));
		if(ReadLineError(valsread,1,"number of precipitation values"))	return NULL;
	}
	//MPI_Bcast(&(Forecaster->num_rainsteps),1,MPI_UNSIGNED,0,MPI_COMM_WORLD);

	//Read forecast window
	ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
	valsread = sscanf(linebuffer,"%lf",&(Forecaster->forecast_window));
	if(ReadLineError(valsread,1,"forecast window"))	return NULL;

	//Read and create a database connection for the rain maps
	Forecaster->rainmaps_filename = NULL;
	Forecaster->rainmaps_db = NULL;
	//if(my_rank == 0)
	{
		Forecaster->rainmaps_filename = (char*) malloc(string_size*sizeof(char));
		ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
		valsread = sscanf(linebuffer,"%s",Forecaster->rainmaps_filename);
		if(ReadLineError(valsread,1,"rain map filename"))	return NULL;

		Forecaster->rainmaps_db = ReadDBC(Forecaster->rainmaps_filename,string_size);
		if(!Forecaster->rainmaps_db)	return NULL;
	}

	//Read halt filename
	Forecaster->halt_filename = (char*) malloc(string_size*sizeof(char));
	//if(my_rank == 0)
	{
		ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
		valsread = sscanf(linebuffer,"%s",Forecaster->halt_filename);
		if(ReadLineError(valsread,1,"halt filename"))	return NULL;
		//length = strlen(Forecaster->halt_filename);
	}
	//MPI_Bcast(&length,1,MPI_UNSIGNED,0,MPI_COMM_WORLD);
	//MPI_Bcast(Forecaster->halt_filename,length+1,MPI_CHAR,0,MPI_COMM_WORLD);

	//Read ending mark
//.........这里部分代码省略.........
开发者ID:Iowa-Flood-Center,项目名称:asynch,代码行数:101,代码来源:forecaster_methods.c

示例15: Scheduler


//.........这里部分代码省略.........
				Fluid::DENS,
				Fluid::BX,
				Fluid::BY,
				Fluid::BZ};

			std::vector<float> data(local_n);
			for (int var = 0; var < 7; var++)
			{
				fprintf(stderr, " reading vat %d out of %d \n", var+1, 7);
				int ival;
				size_t nread;
				nread = fread(&ival, sizeof(int), 1, fin); assert(ival == local_n*(int)sizeof(float));
				nread = fread(&data[0], sizeof(float), local_n, fin);
				assert((int)nread == local_n);
				nread = fread(&ival, sizeof(int), 1, fin); assert(ival == local_n*(int)sizeof(float));
				for (int i = 0; i < local_n; i++)
					U[i][var_list[var]] = data[i];
			}
			for (int i = 0; i < local_n; i++)
			{
				assert(U[i][Fluid::DENS] > 0.0);
				U[i][Fluid::ETHM] = cs2 * U[i][Fluid::DENS];
			}


			fclose(fin);

			fprintf(stderr, "  *** proc= %d : local_n= %d  global_n= %d \n", myproc, local_n, global_n);
		} // myproc == 0

		MPI_Bcast(&global_n,  1, MPI_INT, 0, MPI_COMM_WORLD);

		fprintf(stderr, " proc= %d  distrubite \n", myproc);
		MPI_Barrier(MPI_COMM_WORLD);

		Distribute::int3 nt(1, 1, 1);
		switch(nproc) {
			case 1: break;
			case 2: nt.x = 2; nt.y = 1; nt.z = 1; break;
			case 4: nt.x = 2; nt.y = 2; nt.z = 1; break;
			case 6: nt.x = 3; nt.y = 2; nt.z = 1; break;
			case 8: nt.x = 2; nt.y = 2; nt.z = 2; break;
			case 16: nt.x = 4; nt.y = 2; nt.z = 2; break;
			case 32: nt.x = 4; nt.y = 4; nt.z = 2; break;
			case 64: nt.x = 4; nt.y = 4; nt.z = 4; break;
			case 128: nt.x = 8; nt.y = 4; nt.z = 4; break;
			case 256: nt.x = 8; nt.y = 8; nt.z = 4; break;
			case 512: nt.x = 8; nt.y = 8; nt.z = 8; break;
			default: assert(false);
		}

		const Distribute::int3 nt_glb(nt);
		const pBoundary pglobal_domain(pfloat3(0.0), pfloat3(Len3));
		distribute_glb.set(nproc, nt, pglobal_domain);

		for (int k = 0; k < 5; k++)
			distribute_data(true, false);

		const int nloc_reserve = (int)(2.0*global_n/nproc);
		fit_reserve_vec(ptcl,      nloc_reserve);
		fit_reserve_vec(ptcl_ppos, nloc_reserve);
		fit_reserve_vec(U,         nloc_reserve);
		fit_reserve_vec(dU,        nloc_reserve);
		fit_reserve_vec(Wgrad,     nloc_reserve);
		fit_reserve_vec(gradPsi,   nloc_reserve);
		fit_reserve_vec(cells,     nloc_reserve);
开发者ID:QirongZhu,项目名称:fvmhd3d,代码行数:67,代码来源:mhd_turbulence_sph.cpp


注:本文中的MPI_Barrier函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。