本文整理汇总了C++中el::DistMatrix::Height方法的典型用法代码示例。如果您正苦于以下问题:C++ DistMatrix::Height方法的具体用法?C++ DistMatrix::Height怎么用?C++ DistMatrix::Height使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类el::DistMatrix
的用法示例。
在下文中一共展示了DistMatrix::Height方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Gemv
inline void Gemv(El::Orientation oA,
T alpha, const El::DistMatrix<T, El::VC, El::STAR>& A,
const El::DistMatrix<T, El::STAR, El::STAR>& x,
El::DistMatrix<T, El::VC, El::STAR>& y) {
int y_height = (oA == El::NORMAL ? A.Height() : A.Width());
El::Zeros(y, y_height, 1);
base::Gemv(oA, alpha, A, x, T(0), y);
}
示例2: outer_panel_mixed_gemm_impl_nn
inline void outer_panel_mixed_gemm_impl_nn(
const double alpha,
const SpParMat<index_type, value_type, SpDCCols<index_type, value_type> > &A,
const El::DistMatrix<value_type, El::STAR, El::STAR> &S,
const double beta,
El::DistMatrix<value_type, col_d, El::STAR> &C) {
utility::combblas_slab_view_t<index_type, value_type> cbview(A, false);
//FIXME: factor
size_t slab_size = 2 * C.Grid().Height();
for(size_t cur_row_idx = 0; cur_row_idx < cbview.nrows();
cur_row_idx += slab_size) {
size_t cur_slab_size =
std::min(slab_size, cbview.nrows() - cur_row_idx);
// get the next slab_size columns of B
El::DistMatrix<value_type, col_d, El::STAR>
A_row(cur_slab_size, S.Height());
cbview.extract_elemental_row_slab_view(A_row, cur_slab_size);
// assemble the distributed column vector
for(size_t l_col_idx = 0; l_col_idx < A_row.LocalWidth();
l_col_idx++) {
size_t g_col_idx = l_col_idx * A_row.RowStride()
+ A_row.RowShift();
for(size_t l_row_idx = 0; l_row_idx < A_row.LocalHeight();
++l_row_idx) {
size_t g_row_idx = l_row_idx * A_row.ColStride()
+ A_row.ColShift() + cur_row_idx;
A_row.SetLocal(l_row_idx, l_col_idx,
cbview(g_row_idx, g_col_idx));
}
}
El::DistMatrix<value_type, col_d, El::STAR>
C_slice(cur_slab_size, C.Width());
El::View(C_slice, C, cur_row_idx, 0, cur_slab_size, C.Width());
El::LocalGemm(El::NORMAL, El::NORMAL, alpha, A_row, S,
beta, C_slice);
}
}
示例3: L1DistanceMatrixTU
void L1DistanceMatrixTU(El::UpperOrLower uplo,
direction_t dirA, direction_t dirB, T alpha,
const El::DistMatrix<T, El::STAR, El::MC> &A,
const El::DistMatrix<T, El::STAR, El::MR> &B,
T beta, El::DistMatrix<T> &C) {
// TODO verify sizes
const T *a = A.LockedBuffer();
El::Int ldA = A.LDim();
const T *b = B.LockedBuffer();
El::Int ldB = B.LDim();
T *c = C.Buffer();
El::Int ldC = C.LDim();
El::Int d = A.Height();
/* Not the most efficient way... but mimicking BLAS is too much work! */
if (dirA == base::COLUMNS && dirB == base::COLUMNS) {
El::Int n = C.LocalWidth();
El::Int m = C.LocalHeight();
for (El::Int j = 0; j < n; j++)
for(El::Int i =
((uplo == El::UPPER) ? 0 : C.LocalRowOffset(A.GlobalCol(j)));
i < ((uplo == El::UPPER) ? C.LocalRowOffset(A.GlobalCol(j) + 1) : m); i++) {
T v = 0.0;
for (El::Int k = 0; k < d; k++)
v += std::abs(b[j * ldB + k] - a[i * ldA + k]);
c[j * ldC + i] = beta * c[j * ldC + i] + alpha * v;
}
}
// TODO the rest of the cases.
}
示例4: outer_panel_mixed_gemm_impl_tn
inline void outer_panel_mixed_gemm_impl_tn(
const double alpha,
const SpParMat<index_type, value_type, SpDCCols<index_type, value_type> > &A,
const El::DistMatrix<value_type, col_d, El::STAR> &S,
const double beta,
El::DistMatrix<value_type, El::STAR, El::STAR> &C) {
El::DistMatrix<value_type, El::STAR, El::STAR>
tmp_C(C.Height(), C.Width());
El::Zero(tmp_C);
utility::combblas_slab_view_t<index_type, value_type> cbview(A, false);
//FIXME: factor
size_t slab_size = 2 * S.Grid().Height();
for(size_t cur_row_idx = 0; cur_row_idx < cbview.ncols();
cur_row_idx += slab_size) {
size_t cur_slab_size =
std::min(slab_size, cbview.ncols() - cur_row_idx);
// get the next slab_size columns of B
El::DistMatrix<value_type, El::STAR, El::STAR>
A_row(cur_slab_size, S.Height());
// transpose is column
//cbview.extract_elemental_column_slab_view(A_row, cur_slab_size);
cbview.extract_full_slab_view(cur_slab_size);
// matrix mult (FIXME only iter nz)
for(size_t l_row_idx = 0; l_row_idx < A_row.LocalHeight();
++l_row_idx) {
size_t g_row_idx = l_row_idx * A_row.ColStride()
+ A_row.ColShift() + cur_row_idx;
for(size_t l_col_idx = 0; l_col_idx < A_row.LocalWidth();
l_col_idx++) {
//XXX: should be the same as l_col_idx
size_t g_col_idx = l_col_idx * A_row.RowStride()
+ A_row.RowShift();
// continue if we don't own values in S in this row
if(!S.IsLocalRow(g_col_idx))
continue;
//get transposed value
value_type val = alpha * cbview(g_col_idx, g_row_idx);
for(size_t s_col_idx = 0; s_col_idx < S.LocalWidth();
s_col_idx++) {
tmp_C.UpdateLocal(g_row_idx, s_col_idx,
val * S.GetLocal(S.LocalRow(g_col_idx), s_col_idx));
}
}
}
}
//FIXME: scaling
if(A.getcommgrid()->GetRank() == 0) {
for(size_t col_idx = 0; col_idx < C.Width(); col_idx++)
for(size_t row_idx = 0; row_idx < C.Height(); row_idx++)
tmp_C.UpdateLocal(row_idx, col_idx,
beta * C.GetLocal(row_idx, col_idx));
}
//FIXME: Use utility getter
boost::mpi::communicator world(
A.getcommgrid()->GetWorld(), boost::mpi::comm_duplicate);
boost::mpi::all_reduce (world,
tmp_C.LockedBuffer(),
C.Height() * C.Width(),
C.Buffer(),
std::plus<value_type>());
}
示例5: Height
int Height(const El::DistMatrix<T, U, V>& A) {
return A.Height();
}
示例6: elemental2vec
int elemental2vec(const El::DistMatrix<El::Complex<double>,El::VC,El::STAR> &Y, std::vector<double> &vec){
assert((Y.DistData().colDist == El::STAR) and (Y.DistData().rowDist == El::VC));
int data_dof=2;
int SCAL_EXP = 1;
//double *pt_array,*pt_perm_array;
int r,q,ll,rq; // el vec info
int nbigs; //Number of large recv (i.e. recv 1 extra data point)
int pstart; // p_id of nstart
int rank = El::mpi::WorldRank(); //p_id
int recv_size; // base recv size
bool print = (rank == -1);
// Get el vec info
ll = Y.Height();
const El::Grid* g = &(Y.Grid());
r = g->Height();
q = g->Width();
MPI_Comm comm = (g->Comm()).comm;
int cheb_deg = InvMedTree<FMM_Mat_t>::cheb_deg;
int omp_p=omp_get_max_threads();
size_t n_coeff3=(cheb_deg+1)*(cheb_deg+2)*(cheb_deg+3)/6;
// Get petsc vec params
//VecGetLocalSize(pt_vec,&nlocal);
int nlocal = (vec.size())/data_dof;
if(print) std::cout << "m: " << std::endl;
int nstart = 0;
//VecGetArray(pt_vec,&pt_array);
//VecGetOwnershipRange(pt_vec,&nstart,NULL);
MPI_Exscan(&nlocal,&nstart,1,MPI_INT,MPI_SUM,comm);
// Determine who owns the first element we want
rq = r * q;
pstart = nstart % rq;
nbigs = nlocal % rq;
recv_size = nlocal / rq;
if(print){
std::cout << "r: " << r << " q: " << q <<std::endl;
std::cout << "nstart: " << nstart << std::endl;
std::cout << "ps: " << pstart << std::endl;
std::cout << "nbigs: " << nbigs << std::endl;
std::cout << "recv_size: " << recv_size << std::endl;
}
// Make recv sizes
std::vector<int> recv_lengths(rq);
std::fill(recv_lengths.begin(),recv_lengths.end(),recv_size);
if(nbigs >0){
for(int i=0;i<nbigs;i++){
recv_lengths[(pstart + i) % rq] += 1;
}
}
// Make recv disps
std::vector<int> recv_disps = exscan(recv_lengths);
// All2all to get send sizes
std::vector<int> send_lengths(rq);
MPI_Alltoall(&recv_lengths[0], 1, MPI_INT, &send_lengths[0], 1, MPI_INT,comm);
// Scan to get send_disps
std::vector<int> send_disps = exscan(send_lengths);
// Do all2allv to get data on correct processor
std::vector<El::Complex<double>> recv_data(nlocal);
std::vector<El::Complex<double>> recv_data_ordered(nlocal);
//MPI_Alltoallv(el_vec.Buffer(),&send_lengths[0],&send_disps[0],MPI_DOUBLE, \
&recv_data[0],&recv_lengths[0],&recv_disps[0],MPI_DOUBLE,comm);
El::mpi::AllToAll(Y.LockedBuffer(), &send_lengths[0], &send_disps[0], &recv_data[0],&recv_lengths[0],&recv_disps[0],comm);
if(print){
//std::cout << "Send data: " <<std::endl << *el_vec.Buffer() <<std::endl;
std::cout << "Send lengths: " <<std::endl << send_lengths <<std::endl;
std::cout << "Send disps: " <<std::endl << send_disps <<std::endl;
std::cout << "Recv data: " <<std::endl << recv_data <<std::endl;
std::cout << "Recv lengths: " <<std::endl << recv_lengths <<std::endl;
std::cout << "Recv disps: " <<std::endl << recv_disps <<std::endl;
}
// Reorder the data so taht it is in the right order for the fmm tree
for(int p=0;p<rq;p++){
int base_idx = (p - pstart + rq) % rq;
int offset = recv_disps[p];
for(int i=0;i<recv_lengths[p];i++){
recv_data_ordered[base_idx + rq*i] = recv_data[offset + i];
}
}
// loop through and put the data into the vector
#pragma omp parallel for
for(int i=0;i<nlocal; i++){
vec[2*i] = El::RealPart(recv_data_ordered[i]);
vec[2*i+1] = El::ImagPart(recv_data_ordered[i]);
}
//.........这里部分代码省略.........
示例7: El2Petsc_vec
void El2Petsc_vec(El::DistMatrix<double,VC,STAR>& el_vec,Vec& pt_vec){
PetscInt nlocal, nstart; // petsc vec info
PetscScalar *pt_array,*pt_perm_array;
int r,q,ll,rq; // el vec info
int nbigs; //Number of large recv (i.e. recv 1 extra data point)
int pstart; // p_id of nstart
int p = El::mpi::WorldRank(); //p_id
int recv_size; // base recv size
bool print = p == -1;
// Get el vec info
ll = el_vec.Height();
const El::Grid* g = &(el_vec.Grid());
r = g->Height();
q = g->Width();
MPI_Comm comm = (g->Comm()).comm;
// Get petsc vec params
VecGetLocalSize(pt_vec,&nlocal);
VecGetArray(pt_vec,&pt_array);
VecGetOwnershipRange(pt_vec,&nstart,NULL);
// Determine who owns the first element we want
rq = r * q;
pstart = nstart % rq;
nbigs = nlocal % rq;
recv_size = nlocal / rq;
if(print){
std::cout << "r: " << r << " q: " << q <<std::endl;
std::cout << "nstart: " << nstart << std::endl;
std::cout << "ps: " << pstart << std::endl;
std::cout << "nbigs: " << nbigs << std::endl;
std::cout << "recv_size: " << recv_size << std::endl;
}
// Make recv sizes
std::vector<int> recv_lengths(rq);
std::fill(recv_lengths.begin(),recv_lengths.end(),recv_size);
if(nbigs >0){
for(int i=0;i<nbigs;i++){
recv_lengths[(pstart + i) % rq] += 1;
}
}
// Make recv disps
std::vector<int> recv_disps = exscan(recv_lengths);
// All2all to get send sizes
std::vector<int> send_lengths(rq);
MPI_Alltoall(&recv_lengths[0], 1, MPI_INT, &send_lengths[0], 1, MPI_INT,comm);
// Scan to get send_disps
std::vector<int> send_disps = exscan(send_lengths);
// Do all2allv to get data on correct processor
std::vector<double> recv_data(nlocal);
MPI_Alltoallv(el_vec.Buffer(),&send_lengths[0],&send_disps[0],MPI_DOUBLE, \
&recv_data[0],&recv_lengths[0],&recv_disps[0],MPI_DOUBLE,comm);
if(print){
//std::cout << "Send data: " <<std::endl << *el_vec.Buffer() <<std::endl;
std::cout << "Send lengths: " <<std::endl << send_lengths <<std::endl;
std::cout << "Send disps: " <<std::endl << send_disps <<std::endl;
std::cout << "Recv data: " <<std::endl << recv_data <<std::endl;
std::cout << "Recv lengths: " <<std::endl << recv_lengths <<std::endl;
std::cout << "Recv disps: " <<std::endl << recv_disps <<std::endl;
}
// Reorder for petsc
for(int p=0;p<rq;p++){
int base_idx = (p - pstart + rq) % rq;
int offset = recv_disps[p];
for(int i=0;i<recv_lengths[p];i++){
pt_array[base_idx + rq*i] = recv_data[offset + i];
}
}
// Copy into array
VecRestoreArray(pt_vec,&pt_array);
}