当前位置: 首页>>代码示例>>C++>>正文


C++ Epetra_SerialComm::Broadcast方法代码示例

本文整理汇总了C++中Epetra_SerialComm::Broadcast方法的典型用法代码示例。如果您正苦于以下问题:C++ Epetra_SerialComm::Broadcast方法的具体用法?C++ Epetra_SerialComm::Broadcast怎么用?C++ Epetra_SerialComm::Broadcast使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Epetra_SerialComm的用法示例。


在下文中一共展示了Epetra_SerialComm::Broadcast方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: main

int main(int argc, char *argv[]) {

  int returnierr=0;

  bool verbose = false;

#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);

#else
  Epetra_SerialComm Comm;
#endif

  // Check if we should print results to standard out
  if (argc>1) {
    if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;
  }

  //Make sure the value of verbose is consistent across processors.
  int verbose_int = verbose ? 1 : 0;
  Comm.Broadcast(&verbose_int, 1, 0);
  verbose = verbose_int==1 ? true : false;

  if (!verbose) {
    Comm.SetTracebackMode(0); // This should shut down error traceback reporting
  }

  if (verbose && Comm.MyPID()==0)
    cout << EpetraExt::EpetraExt_Version() << endl << endl;

  EPETRA_CHK_ERR( check_rowpermute_crsmatrix_local_diagonal( Comm, verbose ) );

  EPETRA_CHK_ERR( check_rowpermute_crsmatrix_global_diagonal( Comm, verbose) );

  EPETRA_CHK_ERR( check_rowpermute_crsgraph_local_diagonal( Comm, verbose) );

  EPETRA_CHK_ERR( check_colpermute_crsgraph( Comm, verbose) );

  EPETRA_CHK_ERR( check_colpermute_crsmatrix( Comm, verbose) );

  EPETRA_CHK_ERR( check_rowpermute_multivector_local( Comm, verbose) );


#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  return returnierr;
}
开发者ID:haripandey,项目名称:trilinos,代码行数:53,代码来源:cxx_main.cpp

示例2: Initialize

  void BurkardtFileIOHandler::Initialize( const Teuchos::RCP<Teuchos::ParameterList>& params )
  {

#ifdef EPETRA_MPI
    Epetra_MpiComm comm( MPI_COMM_WORLD );
#else
    Epetra_SerialComm comm;
#endif

    // Get the "File I/O" sublist.
    Teuchos::ParameterList& fileio_params = params->sublist( "File IO" );
    
    if( fileio_params.isParameter("Burkardt Data Format File") ) 
      {      
        std::string format_file = Teuchos::getParameter<std::string>( fileio_params, "Burkardt Data Format File" );
        //
        // The first processor get the number of nodes from the data format file and then broadcasts it.
        //
        if ( comm.MyPID() == 0 ) 
          num_nodes = data_size( format_file );
        comm.Broadcast( &num_nodes, 1, 0 );
        // if (!num_nodes) { TO DO:  THROW EXCEPTION! }
        isInit = true;
      } 
    else 
    {
      // Can't find the data size or data format file
      isInit = false;
      TEUCHOS_TEST_FOR_EXCEPTION(true, std::runtime_error, "Cannot find the data size or data format file 'Burkardt Data Format File'!");
    }

    // Get the input path.
    in_path = "";
    if ( fileio_params.isParameter( "Data Input Path" ) ) {       
      in_path = Teuchos::getParameter<std::string>( fileio_params, "Data Input Path" );
    }

    // Get the output path.
    out_path = "";
    if ( fileio_params.isParameter( "Data Output Path" ) ) {
      out_path = Teuchos::getParameter<std::string>( fileio_params, "Data Output Path" );
    }

    // This file i/o handler is not initialized.
    isInit = true;
  }
开发者ID:Tech-XCorp,项目名称:Trilinos,代码行数:46,代码来源:RBGen_BurkardtFileIOHandler.cpp

示例3: main

int main(int argc, char *argv[])
{
  int ierr = 0;

#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc, &argv);
  int rank; // My process ID

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );

#else

  int rank = 0;
  Epetra_SerialComm Comm;

#endif

  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  int verbose_int = verbose ? 1 : 0;
  Comm.Broadcast(&verbose_int, 1, 0);
  verbose = verbose_int==1 ? true : false;

  Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  if(verbose && MyPID==0)
    std::cout << Epetra_Version() << std::endl << std::endl;

  if (verbose) std::cout << "Processor "<<MyPID<<" of "<< NumProc
		    << " is alive."<< std::endl;

  // unused: bool verbose1 = verbose;

  // Redefine verbose to only print on PE 0
  if(verbose && rank!=0) 
    verbose = false;

  if (verbose) std::cout << "Test the memory management system of the class CrsMatrix (memory leak, invalid free)" << std::endl;

  //
  // Test 1: code initially proposed to illustrate bug #5499
  //
  
  if(Comm.NumProc() == 1) { // this is a sequential test

    if (verbose) std::cout << "* Using Copy, ColMap, Variable number of indices per row and Static profile (cf. bug #5499)." << std::endl;

    // Row Map
    Epetra_Map RowMap(2LL, 0LL, Comm);
    
    // ColMap  
    std::vector<long long> colids(2);
    colids[0]=0;
    colids[1]=1;
    Epetra_Map ColMap(-1LL, 2, &colids[0], 0LL, Comm);

    // NumEntriesPerRow
    std::vector<int> NumEntriesPerRow(2);
    NumEntriesPerRow[0]=2;
    NumEntriesPerRow[1]=2;
    
    // Test
    Epetra_CrsMatrix A(Copy, RowMap, ColMap, &NumEntriesPerRow[0], true);
    // Bug #5499 shows up because InsertGlobalValues() is not called (CrsMatrix::Values_ not allocated but freed)
    A.FillComplete();
    
  }

  //
  // Test 1 Bis: same as Test1, but without ColMap and variable number of indices per row. Does not seems to matter
  //
  
  if(Comm.NumProc() == 1) { // this is a sequential test

    if (verbose) std::cout << "* Using Copy, Fixed number of indices per row and Static profile" << std::endl;

    Epetra_Map RowMap(2LL, 0LL, Comm);

    // Test
    Epetra_CrsMatrix    A(Copy, RowMap, 1, true);
    // Bug #5499 shows up because InsertGlobalValues() is not called (CrsMatrix::Values_ not allocated but freed)
    A.FillComplete();
    
  }

  //
  // Test 2: same as Test 1 Bis but with one call to InsertGlobalValues.
  //

  if(Comm.NumProc() == 1) {

//.........这里部分代码省略.........
开发者ID:gitter-badger,项目名称:quinoa,代码行数:101,代码来源:memorytest_main.cpp

示例4: main

int main(int argc, char *argv[])
{
  int ierr = 0, i, forierr = 0;
#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc,&argv);
  int rank; // My process ID

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );

#else

  int rank = 0;
  Epetra_SerialComm Comm;

#endif

  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  int verbose_int = verbose ? 1 : 0;
  Comm.Broadcast(&verbose_int, 1, 0);
  verbose = verbose_int==1 ? true : false;


  //  char tmp;
  //  if (rank==0) cout << "Press any key to continue..."<< endl;
  //  if (rank==0) cin >> tmp;
  //  Comm.Barrier();

  Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  if(verbose && MyPID==0)
    cout << Epetra_Version() << endl << endl;

  if (verbose) cout << "Processor "<<MyPID<<" of "<< NumProc
		    << " is alive."<<endl;

  // Redefine verbose to only print on PE 0
  if(verbose && rank!=0)
		verbose = false;

  int NumMyEquations = 10000;
  long long NumGlobalEquations = (NumMyEquations * NumProc) + EPETRA_MIN(NumProc,3);
  if(MyPID < 3)
    NumMyEquations++;

  // Construct a Map that puts approximately the same Number of equations on each processor

  Epetra_Map Map(NumGlobalEquations, NumMyEquations, 0LL, Comm);

  // Get update list and number of local equations from newly created Map
  vector<long long> MyGlobalElements(Map.NumMyElements());
  Map.MyGlobalElements(&MyGlobalElements[0]);

  // Create an integer vector NumNz that is used to build the Petra Matrix.
  // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation on this processor

  vector<int> NumNz(NumMyEquations);

  // We are building a tridiagonal matrix where each row has (-1 2 -1)
  // So we need 2 off-diagonal terms (except for the first and last equation)

  for(i = 0; i < NumMyEquations; i++)
    if((MyGlobalElements[i] == 0) || (MyGlobalElements[i] == NumGlobalEquations - 1))
      NumNz[i] = 1;
    else
      NumNz[i] = 2;

  // Create a Epetra_Matrix

  Epetra_CrsMatrix A(Copy, Map, &NumNz[0]);
  EPETRA_TEST_ERR(A.IndicesAreGlobal(),ierr);
  EPETRA_TEST_ERR(A.IndicesAreLocal(),ierr);

  // Add  rows one-at-a-time
  // Need some vectors to help
  // Off diagonal Values will always be -1


  vector<double> Values(2);
  Values[0] = -1.0;
	Values[1] = -1.0;
	vector<long long> Indices(2);
  double two = 2.0;
  int NumEntries;

  forierr = 0;
  for(i = 0; i < NumMyEquations; i++) {
    if(MyGlobalElements[i] == 0) {
			Indices[0] = 1;
			NumEntries = 1;
		}
//.........这里部分代码省略.........
开发者ID:00liujj,项目名称:trilinos,代码行数:101,代码来源:cxx_main.cpp

示例5: main

int main(int argc, char *argv[])
{
  int ierr = 0, forierr = 0;
  bool debug = false;

#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc,&argv);
  int rank; // My process ID

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );

#else

  int rank = 0;
  Epetra_SerialComm Comm;

#endif

  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  int verbose_int = verbose ? 1 : 0;
  Comm.Broadcast(&verbose_int, 1, 0);
  verbose = verbose_int==1 ? true : false;


  //  char tmp;
  //  if (rank==0) cout << "Press any key to continue..."<< std::endl;
  //  if (rank==0) cin >> tmp;
  //  Comm.Barrier();

  Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  if(verbose && MyPID==0)
    cout << Epetra_Version() << std::endl << std::endl;

  if (verbose) cout << "Processor "<<MyPID<<" of "<< NumProc
		    << " is alive."<<endl;

  bool verbose1 = verbose;

  // Redefine verbose to only print on PE 0
  if(verbose && rank!=0) 
		verbose = false;

  int NumMyEquations = 10000;
  int NumGlobalEquations = (NumMyEquations * NumProc) + EPETRA_MIN(NumProc,3);
  if(MyPID < 3) 
    NumMyEquations++;

  // Construct a Map that puts approximately the same Number of equations on each processor

  Epetra_Map Map(NumGlobalEquations, NumMyEquations, 0, Comm);
  
  // Get update list and number of local equations from newly created Map
  int* MyGlobalElements = new int[Map.NumMyElements()];
  Map.MyGlobalElements(MyGlobalElements);

  // Create an integer vector NumNz that is used to build the Petra Matrix.
  // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation on this processor

  int* NumNz = new int[NumMyEquations];

  // We are building a tridiagonal matrix where each row has (-1 2 -1)
  // So we need 2 off-diagonal terms (except for the first and last equation)

  for (int i = 0; i < NumMyEquations; i++)
    if((MyGlobalElements[i] == 0) || (MyGlobalElements[i] == NumGlobalEquations - 1))
      NumNz[i] = 1;
    else
      NumNz[i] = 2;

  // Create a Epetra_Matrix

  Epetra_CrsMatrix A(Copy, Map, NumNz);
  EPETRA_TEST_ERR(A.IndicesAreGlobal(),ierr);
  EPETRA_TEST_ERR(A.IndicesAreLocal(),ierr);
  
  // Add  rows one-at-a-time
  // Need some vectors to help
  // Off diagonal Values will always be -1


  double* Values = new double[2];
  Values[0] = -1.0; 
	Values[1] = -1.0;
  int* Indices = new int[2];
  double two = 2.0;
  int NumEntries;

  forierr = 0;
  for (int i = 0; i < NumMyEquations; i++) {
//.........这里部分代码省略.........
开发者ID:cakeisalie,项目名称:oomphlib_003,代码行数:101,代码来源:cxx_main.cpp

示例6: comm


//.........这里部分代码省略.........
	if (status != NC_NOERR) handle_error(status);
	status = nc_inq_dimlen(ncid,col_id, &cols_t);
	if (status != NC_NOERR) handle_error(status);
	//
	// Get number of nodal variables.
	status=nc_inq_dimid(ncid,"num_nod_var",&num_nod_var_id);
	if (status != NC_NOERR) handle_error(status);
	status=nc_inq_dimlen(ncid,num_nod_var_id,&num_nod_var_t);
	if (status != NC_NOERR) handle_error(status);
	//
	//
        TEUCHOS_TEST_FOR_EXCEPTION(cols_t != cols0 || (int)num_nod_var_t != num_nod_var, std::runtime_error, "Data set in file "+temp_filename+" is of inconsistent size!");
	total_rows += rows_t;
	//
	// If the scaling index vector is needed we can set the endpoint here.
	//
	if (createSSIdx) {
	  idx_pair.second = total_rows-1;
	  scaling_idx.push_back( idx_pair );
	}
	// Close the file.
	status = nc_close(ncid);
	if (status != NC_NOERR) handle_error(status);
      }

      // Convert from size_t to int.
      num_ss = total_rows;
      num_vars = cols0;

      std::cout<<"Number of snapshots: "<< num_ss << std::endl;
      std::cout<<"Length of snapshot : "<< num_vars << std::endl;
    }
    // Broadcast information about size of snapshot matrix.
    comm.Broadcast( &num_ss, 1, 0 );
    comm.Broadcast( &num_vars, 1, 0 );
    //
    // Sync all other processors on the scaling index vector if necessary
    // 
    if (createSSIdx) {
      for (i=0; i<(int)filenames.size(); i++) {
	if ( comm.MyPID() != 0 )
	  scaling_idx.push_back( idx_pair );
	comm.Broadcast( &scaling_idx[i].first, 1, 0 );
	comm.Broadcast( &scaling_idx[i].second, 1, 0 );
      }
      // Set the scaling index vector
      //params_->set("Snapshot Scaling Indices", scaling_idx);   
    }
    //
    // Create maps for new Epetra_MultiVector to hold the snapshots and 
    // temporary Epetra_Vector used by processor 0 to import the information.
    //
    Epetra_Map Map( num_vars, 0, comm );
    Teuchos::RCP<Epetra_MultiVector> newMV = Teuchos::rcp( new Epetra_MultiVector( Map, num_ss ) );
    Epetra_Vector *col_newMV = 0;
    Epetra_Map *Proc0Map = 0;
    int *index = 0;
    float *temp_vec_f = 0;
    double *temp_vec_d = 0;
    //
    if ( comm.MyPID() == 0 ) {
      Proc0Map = new Epetra_Map( num_vars, num_vars, 0, comm );
      temp_vec_f = new float [ num_vars ];
      temp_vec_d = new double [ num_vars ];
      index = new int[ num_vars ];
      for ( i=0; i<num_vars; i++ ) { index[i] = i; }
开发者ID:Tech-XCorp,项目名称:Trilinos,代码行数:67,代码来源:RBGen_NetCDFFileIOHandler.cpp

示例7: main


//.........这里部分代码省略.........
  MLList.set("smoother: Block Chebyshev block list",&Blockids[0]);    
  MLList.set("coarse: type","Amesos-KLU");  
  MLList.set("max levels",2);
  MLList.set("ML output",10);  
  MLList.set("smoother: polynomial order",2);
  strcpy(mystring,"IFPACK Block Cheby (Trivial)");
  TestMultiLevelPreconditioner(mystring, MLList, BadProblem,
                               TotalErrorResidual, TotalErrorExactSol);
  delete [] BlockStarts; delete [] Blockids;

  
  // ====================== //
  // IFPACK Block Cheby (Smart)
  // ====================== //
  // Figure out how many blocks we actually have and build a map...
  Epetra_Map* IfpackMap;
  int g_NumBlocks=-1,g_MaxSize=-1;
  if(Comm.MyPID() == 0){
    const int lineLength = 1025;
    char line[lineLength];

    FILE * f=fopen("localids_in_blocks.dat","r");
    assert(f!=0);

    // Next, strip off header lines (which start with "%")
    do {
      if(fgets(line, lineLength,f)==0) return(-4);
    } while (line[0] == '%');

    // Grab the number we actually care about
    sscanf(line, "%d %d", &g_NumBlocks, &g_MaxSize);
    fclose(f);
  }
  Comm.Broadcast(&g_NumBlocks,1,0);
  Comm.Broadcast(&g_MaxSize,1,0);
  Epetra_Map BlockMap(g_NumBlocks,0,Comm);   
  Epetra_MultiVector *blockids_disk=0;
  rv=EpetraExt::MatrixMarketFileToMultiVector("localids_in_blocks.dat",BlockMap,blockids_disk);

  // Put all the block info into the right place
  NumBlocks=BlockMap.NumMyElements();
  BlockStarts=new int[NumBlocks+1];
  Blockids= new int[g_MaxSize*NumBlocks];
  // NTS: Blockids_ is overallocated because I don't want to write a counting loop
  int i,cidx;
  for(i=0,cidx=0;i<NumBlocks;i++){
    BlockStarts[i]=cidx;
    Blockids[cidx]=(int)(*blockids_disk)[0][i];cidx++;
    if((*blockids_disk)[1][i] > 1e-2){
      Blockids[cidx]=(int)(*blockids_disk)[1][i];cidx++;
    }    
  }
  BlockStarts[NumBlocks]=cidx;
  
  
  if (Comm.MyPID() == 0) PrintLine();
  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type","IFPACK-Block Chebyshev");
  MLList.set("smoother: Block Chebyshev number of blocks",NumBlocks);
  MLList.set("smoother: Block Chebyshev block starts",&BlockStarts[0]);
  MLList.set("smoother: Block Chebyshev block list",&Blockids[0]);    
  MLList.set("coarse: type","Amesos-KLU");  
  MLList.set("max levels",2);
  MLList.set("ML output",10);  
  MLList.set("smoother: polynomial order",2);
  strcpy(mystring,"IFPACK Block Cheby (Smart)");
开发者ID:haripandey,项目名称:trilinos,代码行数:67,代码来源:BlockCheby.cpp

示例8: main

int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  // define an Epetra communicator
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // get the proc ID of this process
  int MyPID = Comm.MyPID();
  
  // get the total number of processes
  int NumProc = Comm.NumProc();
  
  // output some information to std output
  cout << Comm << endl;
  
  // ======================== //
  // now some basic MPI calls //
  // ------------------------ //
  
  int    ivalue;
  double dvalue, dvalue2;
  double* dvalues;  dvalues  = new double[NumProc];
  double* dvalues2; dvalues2 = new double[NumProc];
  int root = 0;
  
  // equivalent to MPI_Barrier
  
  Comm.Barrier();
   
  if (MyPID == root) dvalue = 12.0;

  // On input, the root processor contains the list of values
  // (in this case, a single value). On exit, all processes will
  // have he same list of values. Note that all values must be allocated
  // vefore the broadcast
  
  // equivalent to  MPI_Broadcast
    
  Comm.Broadcast(&dvalue, 1, root);

  // as before, but with integer values. As C++ can bind to the appropriate
  // interface based on argument typing, the type of data is not required.
  
  Comm.Broadcast(&ivalue, 1, root);

  // equivalent MPI_Allgather

  Comm.GatherAll(dvalues, dvalues2, 1);

  // equivalent to MPI_Allreduce with MPI_SUM

  dvalue = 1.0*MyPID;

  Comm.SumAll( &dvalue, dvalues, 1);

  // equivalent to MPI_Allreduce with MPI_SUM

  Comm.MaxAll( &dvalue, dvalues, 1);

  // equiavant to MPI_Scan with MPI_SUM

  dvalue = 1.0 * MyPID;
  
  Comm.ScanSum(&dvalue, &dvalue2, 1);

  cout << "On proc " << MyPID << " dvalue2  = " << dvalue2 << endl;
  
  delete[] dvalues;
  delete[] dvalues2;

  // ======================= //
  // Finalize MPI and return //
  // ----------------------- //
    
#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return( EXIT_SUCCESS );
  
} /* main */
开发者ID:00liujj,项目名称:trilinos,代码行数:86,代码来源:ex1.cpp


注:本文中的Epetra_SerialComm::Broadcast方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。