本文整理汇总了C++中teuchos::RCP::GID方法的典型用法代码示例。如果您正苦于以下问题:C++ RCP::GID方法的具体用法?C++ RCP::GID怎么用?C++ RCP::GID使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类teuchos::RCP
的用法示例。
在下文中一共展示了RCP::GID方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ownedIDs
Teuchos::RCP<PeridigmNS::NeighborhoodData> PeridigmNS::Block::createNeighborhoodDataFromGlobalNeighborhoodData(Teuchos::RCP<const Epetra_BlockMap> globalOverlapScalarPointMap,
Teuchos::RCP<const PeridigmNS::NeighborhoodData> globalNeighborhoodData)
{
int numOwnedPoints = ownedScalarPointMap->NumMyElements();
int* ownedPointGlobalIDs = ownedScalarPointMap->MyGlobalElements();
vector<int> ownedIDs(numOwnedPoints);
vector<int> neighborhoodList;
vector<int> neighborhoodPtr(numOwnedPoints);
int* const globalNeighborhoodList = globalNeighborhoodData->NeighborhoodList();
int* const globalNeighborhoodPtr = globalNeighborhoodData->NeighborhoodPtr();
// Create the neighborhoodList and neighborhoodPtr for this block.
// All the IDs in the neighborhoodList and neighborhoodPtr are local IDs into
// the block-specific overlap map.
for(int i=0 ; i<numOwnedPoints ; ++i){
neighborhoodPtr[i] = (int)(neighborhoodList.size());
int globalID = ownedPointGlobalIDs[i];
ownedIDs[i] = overlapScalarPointMap->LID(globalID);
int globalNeighborhoodListIndex = globalNeighborhoodPtr[globalOverlapScalarPointMap->LID(globalID)];
int numNeighbors = globalNeighborhoodList[globalNeighborhoodListIndex++];
neighborhoodList.push_back(numNeighbors);
for(int j=0 ; j<numNeighbors ; ++j){
int globalNeighborID = globalOverlapScalarPointMap->GID(globalNeighborhoodList[globalNeighborhoodListIndex++]);
neighborhoodList.push_back( overlapScalarPointMap->LID(globalNeighborID) );
}
}
// create the NeighborhoodData for this block
Teuchos::RCP<PeridigmNS::NeighborhoodData> blockNeighborhoodData = Teuchos::rcp(new PeridigmNS::NeighborhoodData);
blockNeighborhoodData->SetNumOwned(ownedIDs.size());
if(ownedIDs.size() > 0){
memcpy(blockNeighborhoodData->OwnedIDs(),
&ownedIDs.at(0),
ownedIDs.size()*sizeof(int));
}
if(neighborhoodPtr.size() > 0){
memcpy(blockNeighborhoodData->NeighborhoodPtr(),
&neighborhoodPtr.at(0),
neighborhoodPtr.size()*sizeof(int));
}
blockNeighborhoodData->SetNeighborhoodListSize(neighborhoodList.size());
if(neighborhoodList.size() > 0){
memcpy(blockNeighborhoodData->NeighborhoodList(),
&neighborhoodList.at(0),
neighborhoodList.size()*sizeof(int));
}
return blockNeighborhoodData;
}
示例2:
// =============================================================================
Teuchos::RCP<Epetra_Vector>
VIO::EpetraMesh::Reader::
extractStateData_ ( const vtkSmartPointer<vtkDataSet> & vtkData,
const Teuchos::RCP<const Epetra_Comm> & comm
) const
{
vtkIdType numArrays = vtkData->GetPointData()->GetNumberOfArrays();
TEUCHOS_ASSERT_EQUALITY ( numArrays, 1 );
const vtkSmartPointer<vtkDataArray> & array =
vtkData->GetPointData()->GetArray(0);
vtkIdType numComponents = array->GetNumberOfComponents();
TEUCHOS_ASSERT_EQUALITY ( numComponents, 2 ); // for *complex* values
// this is the total number of grid points
vtkIdType numPoints = array->GetNumberOfTuples();
// Create maps.
// TODO They are created at another spot already. Avoid the work.
Teuchos::RCP<Epetra_Map> nodesMap = Teuchos::rcp( new Epetra_Map( numPoints, 0, *comm ) );
Teuchos::RCP<Epetra_Map> complexValuesMap = createComplexValuesMap_ ( *nodesMap );
Teuchos::RCP<Epetra_Vector> z =
Teuchos::rcp ( new Epetra_Vector ( *complexValuesMap ) );
// fill z
double val[2];
for ( int k = 0; k < nodesMap->NumMyElements(); k++ )
{
array->GetTuple( nodesMap->GID(k), val );
z->ReplaceMyValue( 2*k , 0, val[0] );
z->ReplaceMyValue( 2*k+1, 0, val[1] );
}
return z;
}
示例3: tempMyGlobalIDs
void
PeridigmNS::ElasticPlasticMaterial::computeAutomaticDifferentiationJacobian(const double dt,
const int numOwnedPoints,
const int* ownedIDs,
const int* neighborhoodList,
PeridigmNS::DataManager& dataManager,
PeridigmNS::SerialMatrix& jacobian,
PeridigmNS::Material::JacobianType jacobianType) const
{
// Compute contributions to the tangent matrix on an element-by-element basis
// To reduce memory re-allocation, use static variable to store Fad types for
// current coordinates (independent variables).
static vector<Sacado::Fad::DFad<double> > y_AD;
// Loop over all points.
int neighborhoodListIndex = 0;
for(int iID=0 ; iID<numOwnedPoints ; ++iID){
// Create a temporary neighborhood consisting of a single point and its neighbors.
int numNeighbors = neighborhoodList[neighborhoodListIndex++];
int numEntries = numNeighbors+1;
int numDof = 3*numEntries;
vector<int> tempMyGlobalIDs(numEntries);
// Put the node at the center of the neighborhood at the beginning of the list.
tempMyGlobalIDs[0] = dataManager.getOwnedScalarPointMap()->GID(iID);
vector<int> tempNeighborhoodList(numEntries);
tempNeighborhoodList[0] = numNeighbors;
for(int iNID=0 ; iNID<numNeighbors ; ++iNID){
int neighborID = neighborhoodList[neighborhoodListIndex++];
tempMyGlobalIDs[iNID+1] = dataManager.getOverlapScalarPointMap()->GID(neighborID);
tempNeighborhoodList[iNID+1] = iNID+1;
}
Epetra_SerialComm serialComm;
Teuchos::RCP<Epetra_BlockMap> tempOneDimensionalMap = Teuchos::rcp(new Epetra_BlockMap(numEntries, numEntries, &tempMyGlobalIDs[0], 1, 0, serialComm));
Teuchos::RCP<Epetra_BlockMap> tempThreeDimensionalMap = Teuchos::rcp(new Epetra_BlockMap(numEntries, numEntries, &tempMyGlobalIDs[0], 3, 0, serialComm));
Teuchos::RCP<Epetra_BlockMap> tempBondMap = Teuchos::rcp(new Epetra_BlockMap(1, 1, &tempMyGlobalIDs[0], numNeighbors, 0, serialComm));
// Create a temporary DataManager containing data for this point and its neighborhood.
PeridigmNS::DataManager tempDataManager;
tempDataManager.setMaps(Teuchos::RCP<const Epetra_BlockMap>(),
tempOneDimensionalMap,
Teuchos::RCP<const Epetra_BlockMap>(),
tempThreeDimensionalMap,
tempBondMap);
// The temporary data manager will have the same fields and data as the real data manager.
vector<int> fieldIds = dataManager.getFieldIds();
tempDataManager.allocateData(fieldIds);
tempDataManager.copyLocallyOwnedDataFromDataManager(dataManager);
// Set up numOwnedPoints and ownedIDs.
// There is only one owned ID, and it has local ID zero in the tempDataManager.
int tempNumOwnedPoints = 1;
vector<int> tempOwnedIDs(tempNumOwnedPoints);
tempOwnedIDs[0] = 0;
// Use the scratchMatrix as sub-matrix for storing tangent values prior to loading them into the global tangent matrix.
// Resize scratchMatrix if necessary
if(scratchMatrix.Dimension() < numDof)
scratchMatrix.Resize(numDof);
// Create a list of global indices for the rows/columns in the scratch matrix.
vector<int> globalIndices(numDof);
for(int i=0 ; i<numEntries ; ++i){
int globalID = tempOneDimensionalMap->GID(i);
for(int j=0 ; j<3 ; ++j)
globalIndices[3*i+j] = 3*globalID+j;
}
// Extract pointers to the underlying data in the constitutiveData array.
double *x, *y, *cellVolume, *weightedVolume, *damage, *bondDamage, *edpN, *lambdaN;
tempDataManager.getData(m_modelCoordinatesFieldId, PeridigmField::STEP_NONE)->ExtractView(&x);
tempDataManager.getData(m_coordinatesFieldId, PeridigmField::STEP_NP1)->ExtractView(&y);
tempDataManager.getData(m_volumeFieldId, PeridigmField::STEP_NONE)->ExtractView(&cellVolume);
tempDataManager.getData(m_weightedVolumeFieldId, PeridigmField::STEP_NONE)->ExtractView(&weightedVolume);
tempDataManager.getData(m_damageFieldId, PeridigmField::STEP_NP1)->ExtractView(&damage);
tempDataManager.getData(m_bondDamageFieldId, PeridigmField::STEP_NP1)->ExtractView(&bondDamage);
tempDataManager.getData(m_deviatoricPlasticExtensionFieldId, PeridigmField::STEP_N)->ExtractView(&edpN);
tempDataManager.getData(m_lambdaFieldId, PeridigmField::STEP_N)->ExtractView(&lambdaN);
// Create arrays of Fad objects for the current coordinates, dilatation, and force density
// Modify the existing vector of Fad objects for the current coordinates
if((int)y_AD.size() < numDof)
y_AD.resize(numDof);
for(int i=0 ; i<numDof ; ++i){
y_AD[i].diff(i, numDof);
y_AD[i].val() = y[i];
}
// Create vectors of empty AD types for the dependent variables
vector<Sacado::Fad::DFad<double> > dilatation_AD(numEntries);
vector<Sacado::Fad::DFad<double> > lambdaNP1_AD(numEntries);
int numBonds = tempDataManager.getData(m_deviatoricPlasticExtensionFieldId, PeridigmField::STEP_N)->MyLength();
vector<Sacado::Fad::DFad<double> > edpNP1(numBonds);
vector<Sacado::Fad::DFad<double> > force_AD(numDof);
// Evaluate the constitutive model using the AD types
MATERIAL_EVALUATION::computeDilatation(x,&y_AD[0],weightedVolume,cellVolume,bondDamage,&dilatation_AD[0],&tempNeighborhoodList[0],tempNumOwnedPoints,m_horizon);
MATERIAL_EVALUATION::computeInternalForceIsotropicElasticPlastic
(
//.........这里部分代码省略.........
示例4: felix_driver_run
// The solve is done in the felix_driver_run function, and the solution is passed back to Glimmer-CISM
// IK, 12/3/13: time_inc_yr and cur_time_yr are not used here...
void felix_driver_run(FelixToGlimmer * ftg_ptr, double& cur_time_yr, double time_inc_yr)
{
//IK, 12/9/13: how come FancyOStream prints an all processors??
Teuchos::RCP<Teuchos::FancyOStream> out(Teuchos::VerboseObjectBase::getDefaultOStream());
if (debug_output_verbosity != 0 & mpiCommT->getRank() == 0) {
std::cout << "In felix_driver_run, cur_time, time_inc = " << cur_time_yr
<< " " << time_inc_yr << std::endl;
}
// ---------------------------------------------
// get u and v velocity solution from Glimmer-CISM
// IK, 11/26/13: need to concatenate these into a single solve for initial condition for Albany/FELIX solve
// IK, 3/14/14: moved this step to felix_driver_run from felix_driver init, since we still want to grab and u and v velocities for CISM if the mesh hasn't changed,
// in which case only felix_driver_run will be called, not felix_driver_init.
// ---------------------------------------------
if (debug_output_verbosity != 0 & mpiCommT->getRank() == 0)
std::cout << "In felix_driver_run: grabbing pointers to u and v velocities in CISM..." << std::endl;
uVel_ptr = ftg_ptr ->getDoubleVar("uvel", "velocity");
vVel_ptr = ftg_ptr ->getDoubleVar("vvel", "velocity");
// ---------------------------------------------
// Set restart solution to the one passed from CISM
// IK, 3/14/14: moved this from felix_driver_init to felix_driver_run.
// ---------------------------------------------
if (debug_output_verbosity != 0 & mpiCommT->getRank() == 0)
std::cout << "In felix_driver_run: setting initial condition from CISM..." << std::endl;
//Check what kind of ordering you have in the solution & create solutionField object.
interleavedOrdering = meshStruct->getInterleavedOrdering();
Albany::AbstractSTKFieldContainer::VectorFieldType* solutionField;
if(interleavedOrdering)
solutionField = Teuchos::rcp_dynamic_cast<Albany::OrdinarySTKFieldContainer<true> >(meshStruct->getFieldContainer())->getSolutionField();
else
solutionField = Teuchos::rcp_dynamic_cast<Albany::OrdinarySTKFieldContainer<false> >(meshStruct->getFieldContainer())->getSolutionField();
//Create vector used to renumber nodes on each processor from the Albany convention (horizontal levels first) to the CISM convention (vertical layers first)
nNodes2D = (global_ewn + 1)*(global_nsn+1); //number global nodes in the domain in 2D
nNodesProc2D = (nsn-2*nhalo+1)*(ewn-2*nhalo+1); //number of nodes on each processor in 2D
cismToAlbanyNodeNumberMap.resize(upn*nNodesProc2D);
for (int j=0; j<nsn-2*nhalo+1;j++) {
for (int i=0; i<ewn-2*nhalo+1; i++) {
for (int k=0; k<upn; k++) {
int index = k+upn*i + j*(ewn-2*nhalo+1)*upn;
cismToAlbanyNodeNumberMap[index] = k*nNodes2D + global_node_id_owned_map_Ptr[i+j*(ewn-2*nhalo+1)];
//if (mpiComm->MyPID() == 0)
// std::cout << "index: " << index << ", cismToAlbanyNodeNumberMap: " << cismToAlbanyNodeNumberMap[index] << std::endl;
}
}
}
//The way it worked out, uVel_ptr and vVel_ptr have more nodes than the nodes in the mesh passed to Albany/CISM for the solve. In particular,
//there is 1 row of halo elements in uVel_ptr and vVel_ptr. To account for this, we copy uVel_ptr and vVel_ptr into std::vectors, which do not have the halo elements.
std::vector<double> uvel_vec(upn*nNodesProc2D);
std::vector<double> vvel_vec(upn*nNodesProc2D);
int counter1 = 0;
int counter2 = 0;
int local_nodeID;
for (int j=0; j<nsn-1; j++) {
for (int i=0; i<ewn-1; i++) {
for (int k=0; k<upn; k++) {
if (j >= nhalo-1 & j < nsn-nhalo) {
if (i >= nhalo-1 & i < ewn-nhalo) {
#ifdef CISM_USE_EPETRA
local_nodeID = node_map->LID(cismToAlbanyNodeNumberMap[counter1]);
#else
local_nodeID = node_map->getLocalElement(cismToAlbanyNodeNumberMap[counter1]);
#endif
uvel_vec[counter1] = uVel_ptr[counter2];
vvel_vec[counter1] = vVel_ptr[counter2];
counter1++;
}
}
counter2++;
}
}
}
//Loop over all the elements to find which nodes are active. For the active nodes, copy uvel and vvel from CISM into Albany solution array to
//use as initial condition.
//NOTE: there is some inefficiency here by looping over all the elements. TO DO? pass only active nodes from Albany-CISM to improve this?
double velScale = seconds_per_year*vel_scaling_param;
for (int i=0; i<nElementsActive; i++) {
for (int j=0; j<8; j++) {
int node_GID = global_element_conn_active_Ptr[i + nElementsActive*j]; //node_GID is 1-based
#ifdef CISM_USE_EPETRA
int node_LID = node_map->LID(node_GID); //node_LID is 0-based
#else
int node_LID = node_map->getLocalElement(node_GID); //node_LID is 0-based
#endif
stk::mesh::Entity node = meshStruct->bulkData->get_entity(stk::topology::NODE_RANK, node_GID);
double* sol = stk::mesh::field_data(*solutionField, node);
//IK, 3/18/14: added division by velScale to convert uvel and vvel from dimensionless to having units of m/year (the Albany units)
sol[0] = uvel_vec[node_LID]/velScale;
sol[1] = vvel_vec[node_LID]/velScale;
}
}
// ---------------------------------------------------------------------------------------------------
// Solve
//.........这里部分代码省略.........
示例5: op
TEUCHOS_UNIT_TEST(interlaced_op, test)
{
#ifdef HAVE_MPI
Teuchos::RCP<const Epetra_Comm> comm = Teuchos::rcp(new Epetra_MpiComm(MPI_COMM_WORLD));
#else
Teuchos::RCP<const Epetra_Comm> comm = Teuchos::rcp(new Epetra_SerialComm);
#endif
//int rank = comm->MyPID();
int numProc = comm->NumProc();
int num_KL = 1;
int porder = 5;
bool full_expansion = false;
Teuchos::RCP<const Stokhos::CompletePolynomialBasis<int,double> > basis = buildBasis(num_KL,porder);
Teuchos::RCP<Stokhos::Sparse3Tensor<int,double> > Cijk;
Teuchos::RCP<Stokhos::ParallelData> sg_parallel_data;
Teuchos::RCP<Stokhos::OrthogPolyExpansion<int,double> > expansion;
{
if(full_expansion)
Cijk = basis->computeTripleProductTensor();
else
Cijk = basis->computeLinearTripleProductTensor();
Teuchos::ParameterList parallelParams;
parallelParams.set("Number of Spatial Processors", numProc);
sg_parallel_data = Teuchos::rcp(new Stokhos::ParallelData(basis, Cijk, comm,
parallelParams));
expansion = Teuchos::rcp(new Stokhos::AlgebraicOrthogPolyExpansion<int,double>(basis,
Cijk));
}
Teuchos::RCP<const EpetraExt::MultiComm> sg_comm = sg_parallel_data->getMultiComm();
// determinstic PDE graph
Teuchos::RCP<Epetra_Map> determRowMap = Teuchos::rcp(new Epetra_Map(-1,10,0,*comm));
Teuchos::RCP<Epetra_CrsGraph> determGraph = Teuchos::rcp(new Epetra_CrsGraph(Copy,*determRowMap,1));
for(int row=0;row<determRowMap->NumMyElements();row++) {
int gid = determRowMap->GID(row);
determGraph->InsertGlobalIndices(gid,1,&gid);
}
for(int row=1;row<determRowMap->NumMyElements()-1;row++) {
int gid = determRowMap->GID(row);
int indices[2] = {gid-1,gid+1};
determGraph->InsertGlobalIndices(gid,2,indices);
}
determGraph->FillComplete();
Teuchos::RCP<Teuchos::ParameterList> params = Teuchos::rcp(new Teuchos::ParameterList);
params->set("Scale Operator by Inverse Basis Norms", false);
params->set("Include Mean", true);
params->set("Only Use Linear Terms", false);
Teuchos::RCP<Stokhos::EpetraSparse3Tensor> epetraCijk =
Teuchos::rcp(new Stokhos::EpetraSparse3Tensor(basis,Cijk,sg_comm));
Teuchos::RCP<Stokhos::EpetraOperatorOrthogPoly> W_sg_blocks =
Teuchos::rcp(new Stokhos::EpetraOperatorOrthogPoly(basis, epetraCijk->getStochasticRowMap(), determRowMap, determRowMap, sg_comm));
for(int i=0; i<W_sg_blocks->size(); i++) {
Teuchos::RCP<Epetra_CrsMatrix> crsMat = Teuchos::rcp(new Epetra_CrsMatrix(Copy,*determGraph));
crsMat->PutScalar(1.0 + i);
W_sg_blocks->setCoeffPtr(i,crsMat); // allocate a bunch of matrices
}
Teuchos::RCP<const Epetra_Map> sg_map =
Teuchos::rcp(EpetraExt::BlockUtility::GenerateBlockMap(
*determRowMap, *(epetraCijk->getStochasticRowMap()),
*(epetraCijk->getMultiComm())));
// build an interlaced operator (object under test) and a benchmark
// fully assembled operator
///////////////////////////////////////////////////////////////////////
Stokhos::InterlacedOperator op(sg_comm,basis,epetraCijk,determGraph,params);
op.PutScalar(0.0);
op.setupOperator(W_sg_blocks);
Stokhos::FullyAssembledOperator full_op(sg_comm,basis,epetraCijk,determGraph,sg_map,sg_map,params);
full_op.PutScalar(0.0);
full_op.setupOperator(W_sg_blocks);
// here we test interlaced operator against the fully assembled operator
///////////////////////////////////////////////////////////////////////
bool result = true;
for(int i=0;i<100;i++) {
// build vector for fully assembled operator (blockwise)
Teuchos::RCP<Stokhos::EpetraVectorOrthogPoly> x_vec_blocks =
Teuchos::rcp(new Stokhos::EpetraVectorOrthogPoly(basis,epetraCijk->getStochasticRowMap(),determRowMap,epetraCijk->getMultiComm()));
Teuchos::RCP<Stokhos::EpetraVectorOrthogPoly> f_vec_blocks =
Teuchos::rcp(new Stokhos::EpetraVectorOrthogPoly(basis,epetraCijk->getStochasticRowMap(),determRowMap,epetraCijk->getMultiComm()));
Teuchos::RCP<Epetra_Vector> x_vec_blocked = x_vec_blocks->getBlockVector();
Teuchos::RCP<Epetra_Vector> f_vec_blocked = f_vec_blocks->getBlockVector();
x_vec_blocked->Random(); // build an initial vector
f_vec_blocked->PutScalar(0.0);
// build interlaced vectors
Teuchos::RCP<Epetra_Vector> x_vec_inter = Teuchos::rcp(new Epetra_Vector(op.OperatorDomainMap()));
Teuchos::RCP<Epetra_Vector> f_vec_inter = Teuchos::rcp(new Epetra_Vector(op.OperatorRangeMap()));
Teuchos::RCP<Epetra_Vector> f_vec_blk_inter = Teuchos::rcp(new Epetra_Vector(op.OperatorRangeMap()));
Stokhos::SGModelEvaluator_Interlaced::copyToInterlacedVector(*x_vec_blocks,*x_vec_inter); // copy random x to
//.........这里部分代码省略.........
示例6: Mesh
//.........这里部分代码省略.........
// for ( int k=0; k<boundaryPoints.size(); k++ )
// std::cout << boundaryPoints[k] << std::endl;
// std::cout << "WWW" << std::endl;
// poly->Print( std::cout );
// vtkCellArray * verts = poly->GetVerts();
// verts->Print( std::cout );
// std::cout << "XXX" << std::endl;
// poly->GetData()->Print( std::cout );
// int numBoundaryPoints = poly->GetNumberOfPoints();
// Teuchos::ArrayRCP<Teuchos::Tuple<double,3> > boundaryPoints( numBoundaryPoints );
// for ( unsigned int k=0; k<numBoundaryPoints; k++ )
// poly->GetPoint( k, boundaryPoints[k].getRawPtr() );
// mesh->setBoundaryNodes( boundaryPoints );
// for ( int k=0; k<points.size(); k++ )
// std::cout << boundaryPoints[k] << std::endl;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// get cells
int globalNumElems = vtkMesh->GetNumberOfCells();
// create an appropriate map
Teuchos::RCP<const Epetra_Map> elemsMap =
Teuchos::rcp ( new Epetra_Map ( globalNumElems, 0, *comm ) );
int localNumElems = elemsMap->NumMyElements();
Teuchos::ArrayRCP<Teuchos::ArrayRCP<int> > elems( localNumElems );
Teuchos::ArrayRCP<Mesh::ElementType> elemTypes( localNumElems );
for ( unsigned int k=0; k<localNumElems; k++ )
{
// set the connectivity table
vtkCell * cell = vtkMesh->GetCell( elemsMap->GID(k) );
int numPoints = cell->GetNumberOfPoints();
elems[k] = Teuchos::ArrayRCP<int>( numPoints );
for ( unsigned int l=0; l<numPoints; l++ )
elems[k][l] = cell->GetPointId( l );
// set the element type
switch( cell->GetCellType() )
{
case VTK_LINE:
elemTypes[k] = Mesh::EDGE2;
break;
case VTK_QUADRATIC_EDGE:
elemTypes[k] = Mesh::EDGE3;
break;
case VTK_TRIANGLE:
elemTypes[k] = Mesh::TRI3;
break;
case VTK_QUADRATIC_TRIANGLE:
elemTypes[k] = Mesh::TRI6;
break;
case VTK_QUAD:
elemTypes[k] = Mesh::QUAD4;
break;
case VTK_QUADRATIC_QUAD:
elemTypes[k] = Mesh::QUAD8;
break;
case VTK_BIQUADRATIC_QUAD:
elemTypes[k] = Mesh::QUAD9;
break;
case VTK_TETRA:
elemTypes[k] = Mesh::TET4;
break;
case VTK_QUADRATIC_TETRA:
elemTypes[k] = Mesh::TET10;
break;
case VTK_HEXAHEDRON:
elemTypes[k] = Mesh::HEX8;
break;
case VTK_QUADRATIC_HEXAHEDRON:
elemTypes[k] = Mesh::HEX20;
break;
case VTK_WEDGE:
elemTypes[k] = Mesh::PRISM6;
break;
case VTK_HIGHER_ORDER_WEDGE:
elemTypes[k] = Mesh::PRISM15;
break;
case VTK_PYRAMID:
elemTypes[k] = Mesh::PYRAMID5;
break;
default:
TEST_FOR_EXCEPTION( true,
std::logic_error,
"Unknown type \""<< cell->GetCellType() <<"\"." );
}
}
mesh->setElems( elems );
mesh->setElemTypes( elemTypes );
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
return mesh;
}