本文整理汇总了C++中el::DistMatrix::DistData方法的典型用法代码示例。如果您正苦于以下问题:C++ DistMatrix::DistData方法的具体用法?C++ DistMatrix::DistData怎么用?C++ DistMatrix::DistData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类el::DistMatrix
的用法示例。
在下文中一共展示了DistMatrix::DistData方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: elemental2vec
int elemental2vec(const El::DistMatrix<El::Complex<double>,El::VC,El::STAR> &Y, std::vector<double> &vec){
assert((Y.DistData().colDist == El::STAR) and (Y.DistData().rowDist == El::VC));
int data_dof=2;
int SCAL_EXP = 1;
//double *pt_array,*pt_perm_array;
int r,q,ll,rq; // el vec info
int nbigs; //Number of large recv (i.e. recv 1 extra data point)
int pstart; // p_id of nstart
int rank = El::mpi::WorldRank(); //p_id
int recv_size; // base recv size
bool print = (rank == -1);
// Get el vec info
ll = Y.Height();
const El::Grid* g = &(Y.Grid());
r = g->Height();
q = g->Width();
MPI_Comm comm = (g->Comm()).comm;
int cheb_deg = InvMedTree<FMM_Mat_t>::cheb_deg;
int omp_p=omp_get_max_threads();
size_t n_coeff3=(cheb_deg+1)*(cheb_deg+2)*(cheb_deg+3)/6;
// Get petsc vec params
//VecGetLocalSize(pt_vec,&nlocal);
int nlocal = (vec.size())/data_dof;
if(print) std::cout << "m: " << std::endl;
int nstart = 0;
//VecGetArray(pt_vec,&pt_array);
//VecGetOwnershipRange(pt_vec,&nstart,NULL);
MPI_Exscan(&nlocal,&nstart,1,MPI_INT,MPI_SUM,comm);
// Determine who owns the first element we want
rq = r * q;
pstart = nstart % rq;
nbigs = nlocal % rq;
recv_size = nlocal / rq;
if(print){
std::cout << "r: " << r << " q: " << q <<std::endl;
std::cout << "nstart: " << nstart << std::endl;
std::cout << "ps: " << pstart << std::endl;
std::cout << "nbigs: " << nbigs << std::endl;
std::cout << "recv_size: " << recv_size << std::endl;
}
// Make recv sizes
std::vector<int> recv_lengths(rq);
std::fill(recv_lengths.begin(),recv_lengths.end(),recv_size);
if(nbigs >0){
for(int i=0;i<nbigs;i++){
recv_lengths[(pstart + i) % rq] += 1;
}
}
// Make recv disps
std::vector<int> recv_disps = exscan(recv_lengths);
// All2all to get send sizes
std::vector<int> send_lengths(rq);
MPI_Alltoall(&recv_lengths[0], 1, MPI_INT, &send_lengths[0], 1, MPI_INT,comm);
// Scan to get send_disps
std::vector<int> send_disps = exscan(send_lengths);
// Do all2allv to get data on correct processor
std::vector<El::Complex<double>> recv_data(nlocal);
std::vector<El::Complex<double>> recv_data_ordered(nlocal);
//MPI_Alltoallv(el_vec.Buffer(),&send_lengths[0],&send_disps[0],MPI_DOUBLE, \
&recv_data[0],&recv_lengths[0],&recv_disps[0],MPI_DOUBLE,comm);
El::mpi::AllToAll(Y.LockedBuffer(), &send_lengths[0], &send_disps[0], &recv_data[0],&recv_lengths[0],&recv_disps[0],comm);
if(print){
//std::cout << "Send data: " <<std::endl << *el_vec.Buffer() <<std::endl;
std::cout << "Send lengths: " <<std::endl << send_lengths <<std::endl;
std::cout << "Send disps: " <<std::endl << send_disps <<std::endl;
std::cout << "Recv data: " <<std::endl << recv_data <<std::endl;
std::cout << "Recv lengths: " <<std::endl << recv_lengths <<std::endl;
std::cout << "Recv disps: " <<std::endl << recv_disps <<std::endl;
}
// Reorder the data so taht it is in the right order for the fmm tree
for(int p=0;p<rq;p++){
int base_idx = (p - pstart + rq) % rq;
int offset = recv_disps[p];
for(int i=0;i<recv_lengths[p];i++){
recv_data_ordered[base_idx + rq*i] = recv_data[offset + i];
}
}
// loop through and put the data into the vector
#pragma omp parallel for
for(int i=0;i<nlocal; i++){
vec[2*i] = El::RealPart(recv_data_ordered[i]);
vec[2*i+1] = El::ImagPart(recv_data_ordered[i]);
}
//.........这里部分代码省略.........