当前位置: 首页>>代码示例>>C++>>正文


C++ VectorXd::dot方法代码示例

本文整理汇总了C++中VectorXd::dot方法的典型用法代码示例。如果您正苦于以下问题:C++ VectorXd::dot方法的具体用法?C++ VectorXd::dot怎么用?C++ VectorXd::dot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在VectorXd的用法示例。


在下文中一共展示了VectorXd::dot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: convex

ConvexObjectivePtr CostFromFunc::convex(const vector<double>& xin) {
  VectorXd x = getVec(xin, vars_);

  ConvexObjectivePtr out(new ConvexObjective());
  QuadExpr quad;
  if (!full_hessian_) {
    double val;
    VectorXd grad,hess;
    calcGradAndDiagHess(*f_, x, epsilon_, val, grad, hess);
    hess = hess.cwiseMax(VectorXd::Zero(hess.size()));
    //QuadExpr& quad = out->quad_;
    quad.affexpr.constant = val - grad.dot(x) + .5*x.dot(hess.cwiseProduct(x));
    quad.affexpr.vars = vars_;
    quad.affexpr.coeffs = toDblVec(grad - hess.cwiseProduct(x));
    quad.vars1 = vars_;
    quad.vars2 = vars_;
    quad.coeffs = toDblVec(hess*.5);
  }
  else {
    double val;
    VectorXd grad;
    MatrixXd hess;
    calcGradHess(f_, x, epsilon_, val, grad, hess);

    MatrixXd pos_hess = MatrixXd::Zero(x.size(), x.size());
    Eigen::SelfAdjointEigenSolver<MatrixXd> es(hess);
    VectorXd eigvals = es.eigenvalues();
    MatrixXd eigvecs = es.eigenvectors();
    for (size_t i=0, end = x.size(); i != end; ++i) { //tricky --- eigen size() is signed
      if (eigvals(i) > 0) pos_hess += eigvals(i) * eigvecs.col(i) * eigvecs.col(i).transpose();
    }

    //QuadExpr& quad = out->quad_;
    quad.affexpr.constant = val - grad.dot(x) + .5*x.dot(pos_hess * x);
    quad.affexpr.vars = vars_;
    quad.affexpr.coeffs = toDblVec(grad - pos_hess * x);

    int nquadterms = (x.size() * (x.size()-1))/2;
    quad.coeffs.reserve(nquadterms);
    quad.vars1.reserve(nquadterms);
    quad.vars2.reserve(nquadterms);
    for (size_t i=0, end = x.size(); i != end; ++i) { //tricky --- eigen size() is signed
      quad.vars1.push_back(vars_[i]);
      quad.vars2.push_back(vars_[i]);
      quad.coeffs.push_back(pos_hess(i,i)/2);
      for (size_t j=i+1; j != end; ++j) {  //tricky --- eigen size() is signed
        quad.vars1.push_back(vars_[i]);
        quad.vars2.push_back(vars_[j]);
        quad.coeffs.push_back(pos_hess(i,j));
      }
    }
  }
  out->addQuadExpr(quad);

  return out;
}
开发者ID:panjia1983,项目名称:channel_backward,代码行数:56,代码来源:modeling_utils.cpp

示例2: improve_energy

bool ConjugateGradientType::improve_energy(bool verbose) {
  iter++;
  //printf("I am running ConjugateGradient::improve_energy\n");
  const double E0 = energy();
  if (E0 != E0) {
    // There is no point continuing, since we're starting with a NaN!
    // So we may as well quit here.
    if (verbose) {
      printf("The initial energy is a NaN, so I'm quitting early from ConjugateGradientType::improve_energy.\n");
      f.print_summary("has nan:", E0);
      fflush(stdout);
    }
    return false;
  }
  double gdotd;
  {
    const VectorXd g = -grad();
    // Let's immediately free the cached gradient stored internally!
    invalidate_cache();

    // Note: my notation vaguely follows that of
    // [wikipedia](http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method).
    // I use the Polak-Ribiere method, with automatic direction reset.
    // Note that we could save some memory by using Fletcher-Reeves, and
    // it seems worth implementing that as an option for
    // memory-constrained problems (then we wouldn't need to store oldgrad).
    double beta = g.dot(g - oldgrad)/oldgradsqr;
    oldgrad = g;
    if (beta < 0 || beta != beta || oldgradsqr == 0) beta = 0;
    oldgradsqr = oldgrad.dot(oldgrad);
    direction = g + beta*direction;
    gdotd = oldgrad.dot(direction);
    if (gdotd < 0) {
      direction = oldgrad; // If our direction is uphill, reset to gradient.
      if (verbose) printf("reset to gradient...\n");
      gdotd = oldgrad.dot(direction);
    }
  }

  Minimizer lm = linmin(f, gd, kT, x, direction, -gdotd, &step);
  for (int i=0; i<100 && lm.improve_energy(verbose); i++) {
    if (verbose) lm.print_info("\t");
  }
  if (verbose) {
    //lm->print_info();
    print_info();
    printf("grad*dir/oldgrad*dir = %g\n", grad().dot(direction)/gdotd);
  }
  return (energy() < E0);
}
开发者ID:droundy,项目名称:deft,代码行数:50,代码来源:ConjugateGradient.cpp

示例3: predict

void SOGP::predict(const VectorXd &state, VectorXd &prediction,
                   VectorXd &prediction_variance) {
    //check if we have initialised the system
    if (!this->initialized) {
        throw OTLException("SOGP not yet initialised");
    }

    double kstar = kernel->eval(state,state);

    //check if we not been trained
    if (this->current_size == 0) {
        prediction = VectorXd::Zero(this->output_dim);
        prediction_variance = VectorXd::Ones(this->output_dim)*
                (kstar + this->noise);
        return;
    }

    VectorXd k;
    kernel->eval(state, this->basis_vectors, k);
    //std::cout << "K: \n" << k << std::endl;
    //std::cout << "alpha: \n" << this->alpha.block(0,0,this->current_size, this->output_dim) << std::endl;

    prediction = k.transpose() *this->alpha.block(0,0,this->current_size, this->output_dim);
    prediction_variance = VectorXd::Ones(this->output_dim)*
            (k.dot(this->C.block(0,0, this->current_size, this->current_size)*k)
             + kstar + this->noise);

    return;
}
开发者ID:farhanrahman,项目名称:nice,代码行数:29,代码来源:otl_sogp.cpp

示例4: cosangle

double cosangle(VectorXd a, VectorXd b) {
    if (a.norm() < 1e-6 || b.norm() < 1e-6) {
        return 1;
    } else {
        return a.dot(b) / (a.norm() * b.norm());
    }
}
开发者ID:panjia1983,项目名称:channel_backward,代码行数:7,代码来源:constraints.cpp

示例5: compute_dog_leg

VectorXd Optimizer::compute_dog_leg(double alpha, const VectorXd& h_sd,
    const VectorXd& h_gn, double delta, double& gain_ratio_denominator) {
  if (h_gn.norm() <= delta) {
    gain_ratio_denominator = current_SSE_at_linpoint;
    return h_gn;
  }

  double h_sd_norm = h_sd.norm();

  if ((alpha * h_sd_norm) >= delta) {
    gain_ratio_denominator = delta * (2 * alpha * h_sd_norm - delta)
        / (2 * alpha);
    return (delta / h_sd_norm) * h_sd;
  } else {
    // complicated case: calculate intersection of trust region with
    // line between Gauss-Newton and steepest descent solutions
    VectorXd a = alpha * h_sd;
    VectorXd b = h_gn;
    double c = a.dot(b - a);
    double b_a_norm2 = (b - a).squaredNorm();
    double a_norm2 = a.squaredNorm();
    double delta2 = delta * delta;
    double sqrt_term = sqrt(c * c + b_a_norm2 * (delta2 - a_norm2));
    double beta;
    if (c <= 0) {
      beta = (-c + sqrt_term) / b_a_norm2;
    } else {
      beta = (delta2 - a_norm2) / (c + sqrt_term);
    }

    gain_ratio_denominator = .5 * alpha * (1 - beta) * (1 - beta) * h_sd_norm
        * h_sd_norm + beta * (2 - beta) * current_SSE_at_linpoint;
    return (alpha * h_sd + beta * (h_gn - alpha * h_sd));
  }
}
开发者ID:Duckietown-NCTU,项目名称:Software,代码行数:35,代码来源:Optimizer.cpp

示例6: zoom

  bool NewtonMinimizerGradHessian::zoom(const double& wolfe1, const double& wolfe2, double& lo, double& hi, VectorXd& try_grad, VectorXd& direction, double& grad0_dir, double& val0, double& val_lo, VectorXd& init_params, VectorXd& try_params, unsigned int max_iter, double& result)
  {
    double tryval = val0;
    double alpha = tryval;
    double temp1 = tryval;
    double temp2 = tryval;
    double temp3 = tryval;
    bool bounds = true;
    
    unsigned int counter = 1;
    while(true)
    {
      alpha = lo + hi;
      alpha*=0.5;
      try_params = direction;
      try_params *= alpha;
      try_params += init_params;
      bounds = function->calcValGrad(try_params, tryval, try_grad);
      for(unsigned int i=0;i<fixparameter.size();++i)
      {
        if(fixparameter[i] != 0)
        {
          try_grad[i] = 0.;
        }
      }
      temp1 = wolfe1*alpha;
      temp1 *= grad0_dir;
      temp1 += val0;
      
      if( ( tryval > temp1 ) || ( tryval >= val_lo ) || (bounds == false) )
      {
//         if( (fabs((tryval - val_lo)/(tryval)) < 1.0e-4) && (tryval < val_lo) ){result = alpha;return true;}
        if( (fabs((tryval - val_lo)/(tryval)) < 1.0e-4) ){result = alpha;return true;}
        hi = alpha;
      }
      else
      {
        temp1 = try_grad.dot(direction);
        temp2 = -wolfe2*grad0_dir;
        temp3 = fabs(temp1);
        
        if( temp3 <= fabs(temp2) )
        {
          result = alpha;
          return bounds;
        }
        temp3 = hi - lo;
        temp1 *= temp3;
        if( temp1 >= 0.)
        {
          hi = lo;
        }
        lo = alpha;
        val_lo = tryval;
      }
      counter++;
      if(counter > max_iter){return false;}
    }
  }
开发者ID:Chongk,项目名称:coresoftware,代码行数:59,代码来源:NewtonMinimizerGradHessian.cpp

示例7: affFromValGrad

AffExpr affFromValGrad(double y, const VectorXd& x, const VectorXd& dydx, const VarVector& vars) {
    AffExpr aff;
    aff.constant = y - dydx.dot(x);
    aff.coeffs = toDblVec(dydx);
    aff.vars = vars;
    aff = cleanupAff(aff);
    return aff;
}
开发者ID:asbroad,项目名称:trajopt,代码行数:8,代码来源:modeling_utils.cpp

示例8: eval

double ObjectiveMLS::eval(const VectorXd& x) const
{ 
  double obj = 0.0;
  for(int i = 0; i < a_.cols(); ++i)
    obj -= logsig(-x.dot(a_.col(i)) - b_(i));
  
  obj /= (double)a_.cols();
  return obj;
}
开发者ID:chen0510566,项目名称:asp,代码行数:9,代码来源:common_functions.cpp

示例9:

double Triangle<ConcreteShape>::integrateField(const VectorXd &field) {

  double val = 0;
  Matrix<double,2,2> inverse_Jacobian;
  double detJ;
  std::tie(inverse_Jacobian,detJ) = ConcreteShape::inverseJacobian(mVtxCrd);
  val = detJ*field.dot(mIntegrationWeights);
  return val;

}
开发者ID:SalvusHub,项目名称:salvus,代码行数:10,代码来源:Triangle.cpp

示例10: kernel

double Spectral::kernel(const VectorXd& a, const VectorXd& b){

	switch(kernel_type){
	    case 2  :
	    	return(pow(a.dot(b)+constant,order));
	    default : 
	    	return(exp(-gamma*((a-b).squaredNorm())));
	}

}
开发者ID:zbxzc35,项目名称:spectral,代码行数:10,代码来源:spectral.cpp

示例11: bounceP

void denseFisherMetric::bounceP(const VectorXd& normal)
{
    
    mAuxVector = mGL.solve(normal);
    
    double C = -2.0 * mP.dot(mAuxVector);
    C /= normal.dot(mAuxVector);
    
    mP += C * normal;
    
}
开发者ID:betanalpha,项目名称:jamon,代码行数:11,代码来源:denseFisherMetric.cpp

示例12: fillObjGrad

 void PositionConstraint::fillObjGrad(std::vector<double>& dG) {
     VectorXd dP = evalCon();
     for(int dofIndex = 0; dofIndex < mNode->getNumDependentDofs(); dofIndex++) {
         int i = mNode->getDependentDof(dofIndex);            
         const Var* v = mVariables[i];
         double w = v->mWeight;
         VectorXd J = xformHom(mNode->getDerivWorldTransform(dofIndex), mOffset);
         J /= w;
         dG[i] += 2 * dP.dot(J);
     }
 }
开发者ID:Tarrasch,项目名称:dart,代码行数:11,代码来源:PositionConstraint.cpp

示例13: dEda

double dEda(const VectorXd &XY,
            const VectorXd &s0,
            const vector<spring> &springlist,
            const vector<vector<int>> &springpairs,
            double kappa,
            const double g11,
            const double g12,
            const double g22)
{  
    double out;
    out=s0.dot(HarmonicGradient(springlist,XY,g11,g12,g22)+BendingGrad(springpairs,springlist,XY,kappa,g11,g12,g22));
    return out;
}
开发者ID:mrquantum,项目名称:calculateclusters,代码行数:13,代码来源:EnergyandGradients.cpp

示例14: z

NaiveBayesClassifier::NaiveBayesClassifier(const vector<VectorXd>& x,
                                           const vector<int>& y) :
k(0), d(0), p(), mu(), var() {
    // n is the number of points
    unsigned n = x.size();
    assert(n > 0);
    assert(y.size() == n);

    // d is the dimensionality
    d = x[0].size();
    for (const VectorXd& v : x)
        assert(v.size() == d);

    // number of classes
    k = *(std::max_element(y.cbegin(), y.cend())) + 1;

    for (int i = 0; i < k; ++i) {
        // find all points in class i
        vector<VectorXd> xi;
        for (unsigned j = 0; j < n; ++j)
            if (y[j] == i)
                xi.push_back(x[j]);

        // ni is the number of points in class i
        int ni = xi.size();
        assert(ni > 0);

        // prior probability
        p.push_back((double)ni / (double)n);

        // class mean
        VectorXd m = VectorXd::Zero(d);
        for (const VectorXd& v : xi)
            m += v;
        m /= ni;
        mu.push_back(m);

        // centered data matrix
        MatrixXd z(d, ni);
        for (int j = 0; j < ni; ++j)
            z.col(j) = xi[j] - m;

        // class-specific attribute variances
        VectorXd variance(d);
        for (int j = 0; j < d; ++j) {
            VectorXd zj = z.row(j);
            variance(j) = (1.0/ni) * zj.dot(zj);
        }
        var.push_back(variance);
    }
}
开发者ID:lfritz,项目名称:data-mining-and-analysis,代码行数:51,代码来源:naivebayesclassifier.cpp

示例15: step_size

double BacktrackingLineSearch::step_size(std::function<double (const VectorXd&)> f,
                                         const VectorXd &dfx,
                                         const VectorXd &x,
                                         const VectorXd &direction) const {
  auto m = direction.dot(dfx);
  auto t = -_c * m;
  auto fx = f(x);
  auto step = _alpha;
  for (unsigned int i = 0; i < _niter; i++) {
    VectorXd new_x = x + direction*step;
    if ((fx - f(new_x)) > step * t) {
      break;
    }
      step = step * _tau;
  }
  return step;
}
开发者ID:topsframework,项目名称:optimus,代码行数:17,代码来源:BacktrackingLineSearch.cpp


注:本文中的VectorXd::dot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。