本文整理汇总了Golang中github.com/gonum/floats.Dot函数的典型用法代码示例。如果您正苦于以下问题:Golang Dot函数的具体用法?Golang Dot怎么用?Golang Dot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Dot函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NextDirection
func (l *LBFGS) NextDirection(loc *Location, dir []float64) (stepSize float64) {
// Uses two-loop correction as described in
// Nocedal, J., Wright, S.: Numerical Optimization (2nd ed). Springer (2006), chapter 7, page 178.
if len(loc.X) != l.dim {
panic("lbfgs: unexpected size mismatch")
}
if len(loc.Gradient) != l.dim {
panic("lbfgs: unexpected size mismatch")
}
if len(dir) != l.dim {
panic("lbfgs: unexpected size mismatch")
}
y := l.y[l.oldest]
floats.SubTo(y, loc.Gradient, l.grad)
s := l.s[l.oldest]
floats.SubTo(s, loc.X, l.x)
sDotY := floats.Dot(s, y)
l.rho[l.oldest] = 1 / sDotY
l.oldest = (l.oldest + 1) % l.Store
copy(l.x, loc.X)
copy(l.grad, loc.Gradient)
copy(dir, loc.Gradient)
// Start with the most recent element and go backward,
for i := 0; i < l.Store; i++ {
idx := l.oldest - i - 1
if idx < 0 {
idx += l.Store
}
l.a[idx] = l.rho[idx] * floats.Dot(l.s[idx], dir)
floats.AddScaled(dir, -l.a[idx], l.y[idx])
}
// Scale the initial Hessian.
gamma := sDotY / floats.Dot(y, y)
floats.Scale(gamma, dir)
// Start with the oldest element and go forward.
for i := 0; i < l.Store; i++ {
idx := i + l.oldest
if idx >= l.Store {
idx -= l.Store
}
beta := l.rho[idx] * floats.Dot(l.y[idx], dir)
floats.AddScaled(dir, l.a[idx]-beta, l.s[idx])
}
// dir contains H^{-1} * g, so flip the direction for minimization.
floats.Scale(-1, dir)
return 1
}
示例2: NextDirection
func (l *LBFGS) NextDirection(loc *Location, dir []float64) (stepSize float64) {
if len(loc.X) != l.dim {
panic("lbfgs: unexpected size mismatch")
}
if len(loc.Gradient) != l.dim {
panic("lbfgs: unexpected size mismatch")
}
if len(dir) != l.dim {
panic("lbfgs: unexpected size mismatch")
}
// Update direction. Uses two-loop correction as described in
// Nocedal, Wright (2006), Numerical Optimization (2nd ed.). Chapter 7, page 178.
copy(dir, loc.Gradient)
floats.SubTo(l.y, loc.Gradient, l.grad)
floats.SubTo(l.s, loc.X, l.x)
copy(l.sHist[l.oldest], l.s)
copy(l.yHist[l.oldest], l.y)
sDotY := floats.Dot(l.y, l.s)
l.rhoHist[l.oldest] = 1 / sDotY
l.oldest++
l.oldest = l.oldest % l.Store
copy(l.x, loc.X)
copy(l.grad, loc.Gradient)
// two loop update. First loop starts with the most recent element
// and goes backward, second starts with the oldest element and goes
// forward. At the end have computed H^-1 * g, so flip the direction for
// minimization.
for i := 0; i < l.Store; i++ {
idx := l.oldest - i - 1
if idx < 0 {
idx += l.Store
}
l.a[idx] = l.rhoHist[idx] * floats.Dot(l.sHist[idx], dir)
floats.AddScaled(dir, -l.a[idx], l.yHist[idx])
}
// Scale the initial Hessian.
gamma := sDotY / floats.Dot(l.y, l.y)
floats.Scale(gamma, dir)
for i := 0; i < l.Store; i++ {
idx := i + l.oldest
if idx >= l.Store {
idx -= l.Store
}
beta := l.rhoHist[idx] * floats.Dot(l.yHist[idx], dir)
floats.AddScaled(dir, l.a[idx]-beta, l.sHist[idx])
}
floats.Scale(-1, dir)
return 1
}
示例3: initNextLinesearch
func (ls *LinesearchMethod) initNextLinesearch(loc *Location, xNext []float64) (EvaluationType, IterationType, error) {
copy(ls.x, loc.X)
var stepSize float64
if ls.first {
stepSize = ls.NextDirectioner.InitDirection(loc, ls.dir)
ls.first = false
} else {
stepSize = ls.NextDirectioner.NextDirection(loc, ls.dir)
}
projGrad := floats.Dot(loc.Gradient, ls.dir)
if projGrad >= 0 {
ls.evalType = NoEvaluation
ls.iterType = NoIteration
return ls.evalType, ls.iterType, ErrNonNegativeStepDirection
}
ls.evalType = ls.Linesearcher.Init(loc.F, projGrad, stepSize)
floats.AddScaledTo(xNext, ls.x, stepSize, ls.dir)
// Compare the starting point for the current iteration with the next
// evaluation point to make sure that rounding errors do not prevent progress.
if floats.Equal(ls.x, xNext) {
ls.evalType = NoEvaluation
ls.iterType = NoIteration
return ls.evalType, ls.iterType, ErrNoProgress
}
ls.iterType = MinorIteration
return ls.evalType, ls.iterType, nil
}
示例4: initNextLinesearch
// initNextLinesearch initializes the next linesearch using the previous
// complete location stored in loc. It fills loc.X and returns an evaluation
// to be performed at loc.X.
func (ls *LinesearchMethod) initNextLinesearch(loc *Location) (Operation, error) {
copy(ls.x, loc.X)
var step float64
if ls.first {
ls.first = false
step = ls.NextDirectioner.InitDirection(loc, ls.dir)
} else {
step = ls.NextDirectioner.NextDirection(loc, ls.dir)
}
projGrad := floats.Dot(loc.Gradient, ls.dir)
if projGrad >= 0 {
return ls.error(ErrNonNegativeStepDirection)
}
op := ls.Linesearcher.Init(loc.F, projGrad, step)
if !op.isEvaluation() {
panic("linesearch: Linesearcher returned invalid operation")
}
floats.AddScaledTo(loc.X, ls.x, step, ls.dir)
if floats.Equal(ls.x, loc.X) {
// Step size is so small that the next evaluation point is
// indistinguishable from the starting point for the current iteration
// due to rounding errors.
return ls.error(ErrNoProgress)
}
ls.lastStep = step
ls.eval = NoOperation // Invalidate all fields of loc.
ls.lastOp = op
return ls.lastOp, nil
}
示例5: Combine
// Combine takes a weighted sum of the inputs with the weights set by parameters
// The last element of parameters is the bias term, so len(parameters) = len(inputs) + 1
func (s SumNeuron) Combine(parameters []float64, inputs []float64) (combination float64) {
/*
for i, val := range inputs {
combination += parameters[i] * val
}
*/
combination = floats.Dot(inputs, parameters[:len(inputs)])
combination += parameters[len(parameters)-1]
return
}
示例6: Iterate
func (ls *LinesearchMethod) Iterate(loc *Location, xNext []float64) (EvaluationType, IterationType, error) {
if ls.iterType == SubIteration {
// We needed to evaluate invalid fields of Location. Now we have them
// and can announce MajorIteration.
copy(xNext, loc.X)
ls.evalType = NoEvaluation
ls.iterType = MajorIteration
return ls.evalType, ls.iterType, nil
}
if ls.iterType == MajorIteration {
// The linesearch previously signaled MajorIteration. Since we're here,
// it means that the previous location is not good enough to converge,
// so start the next linesearch.
return ls.initNextLinesearch(loc, xNext)
}
projGrad := floats.Dot(loc.Gradient, ls.dir)
if ls.Linesearcher.Finished(loc.F, projGrad) {
copy(xNext, loc.X)
// Check if the last evaluation evaluated all fields of Location.
ls.evalType = complementEval(loc, ls.evalType)
if ls.evalType == NoEvaluation {
// Location is complete and MajorIteration can be announced directly.
ls.iterType = MajorIteration
} else {
// Location is not complete, evaluate its invalid fields in SubIteration.
ls.iterType = SubIteration
}
return ls.evalType, ls.iterType, nil
}
// Line search not done, just iterate.
stepSize, evalType, err := ls.Linesearcher.Iterate(loc.F, projGrad)
if err != nil {
ls.evalType = NoEvaluation
ls.iterType = NoIteration
return ls.evalType, ls.iterType, err
}
floats.AddScaledTo(xNext, ls.x, stepSize, ls.dir)
// Compare the starting point for the current iteration with the next
// evaluation point to make sure that rounding errors do not prevent progress.
if floats.Equal(ls.x, xNext) {
ls.evalType = NoEvaluation
ls.iterType = NoIteration
return ls.evalType, ls.iterType, ErrNoProgress
}
ls.evalType = evalType
ls.iterType = MinorIteration
return ls.evalType, ls.iterType, nil
}
示例7: Iterate
func (cg *CG) Iterate(ctx *Context) Operation {
switch cg.resume {
case 1:
cg.resume = 2
return SolvePreconditioner
// Solve M z = r_{i-1}
case 2:
// ρ_i = r_{i-1} · z
cg.rho = floats.Dot(ctx.Residual, ctx.Z)
if !cg.first {
// β = ρ_i / ρ_{i-1}
beta := cg.rho / cg.rho1
// z = z + β p_{i-1}
floats.AddScaled(ctx.Z, beta, ctx.P)
}
cg.first = false
// p_i = z
copy(ctx.P, ctx.Z)
cg.resume = 3
return ComputeAp
// Compute Ap
case 3:
// α = ρ_i / (p_i · Ap_i)
alpha := cg.rho / floats.Dot(ctx.P, ctx.Ap)
// x_i = x_{i-1} + α p_i
floats.AddScaled(ctx.X, alpha, ctx.P)
// r_i = r_{i-1} - α Ap_i
floats.AddScaled(ctx.Residual, -alpha, ctx.Ap)
cg.rho1 = cg.rho
cg.resume = 1
return CheckConvergence
}
panic("unreachable")
}
示例8: isOrthogonal
func isOrthogonal(a *Dense) bool {
rows, cols := a.Dims()
col1 := make([]float64, rows)
col2 := make([]float64, rows)
for i := 0; i < cols-1; i++ {
for j := i + 1; j < cols; j++ {
a.Col(col1, i)
a.Col(col2, j)
dot := floats.Dot(col1, col2)
if math.Abs(dot) > 1e-14 {
return false
}
}
}
return true
}
示例9: LogProb
// LogProb computes the log of the pdf of the point x.
func (n *Normal) LogProb(x []float64) float64 {
dim := n.dim
if len(x) != dim {
panic(badSizeMismatch)
}
// Compute the normalization constant
c := -0.5*float64(dim)*logTwoPi - n.logSqrtDet
// Compute (x-mu)'Sigma^-1 (x-mu)
xMinusMu := make([]float64, dim)
floats.SubTo(xMinusMu, x, n.mu)
d := mat64.NewVector(dim, xMinusMu)
tmp := make([]float64, dim)
tmpVec := mat64.NewVector(dim, tmp)
tmpVec.SolveCholeskyVec(n.chol, d)
return c - 0.5*floats.Dot(tmp, xMinusMu)
}
示例10: Mean
// Mean returns the gaussian process prediction of the mean at the location x.
func (g *GP) Mean(x []float64) float64 {
// y_mean = k_*^T K^-1 y
// where k_* is the vector of the kernel between the new location and all
// of the data points
// y are the outputs at all the data points
// K^-1 is the full covariance of the data points
// (K^-1y is stored)
if len(x) != g.inputDim {
panic(badInputLength)
}
nSamples, _ := g.inputs.Dims()
covariance := make([]float64, nSamples)
for i := range covariance {
covariance[i] = g.kernel.Distance(x, g.inputs.RawRowView(i))
}
y := floats.Dot(g.sigInvY.RawVector().Data, covariance)
return y*g.std + g.mean
}
示例11: ObjGrad
func (l *linesearchFun) ObjGrad(step float64) (f float64, g float64, err error) {
// Take the step (need to add back in the scaling)
for i, val := range l.direction {
l.currLoc[i] = val*step + l.initLoc[i]
}
// Copy the location (in case the user-defined function modifies it)
copy(l.currLocCopy, l.currLoc)
f, gVec, err := l.fun.ObjGrad(l.currLocCopy)
if err != nil {
return f, g, errors.New("linesearch: error during user defined function")
}
// Add the function to the history so that it isn't thrown out
// Copy the gradient vector (in case Fun modifies it)
n := copy(l.currGrad, gVec)
if n != len(l.currLocCopy) {
return f, g, errors.New("linesearch: user defined function returned incorrect gradient length")
}
// Find the gradient in the direction of the search vector
g = floats.Dot(l.direction, l.currGrad)
l.wolfe.SetCurrState(f, g, step)
return f, g, nil
}
示例12: cosCorrMultiNaive
// Explicitly forms vectors and computes normalized dot product.
func cosCorrMultiNaive(f, g *rimg64.Multi) *rimg64.Image {
h := rimg64.New(f.Width-g.Width+1, f.Height-g.Height+1)
n := g.Width * g.Height * g.Channels
a := make([]float64, n)
b := make([]float64, n)
for i := 0; i < h.Width; i++ {
for j := 0; j < h.Height; j++ {
a = a[:0]
b = b[:0]
for u := 0; u < g.Width; u++ {
for v := 0; v < g.Height; v++ {
for p := 0; p < g.Channels; p++ {
a = append(a, f.At(i+u, j+v, p))
b = append(b, g.At(u, v, p))
}
}
}
floats.Scale(1/floats.Norm(a, 2), a)
floats.Scale(1/floats.Norm(b, 2), b)
h.Set(i, j, floats.Dot(a, b))
}
}
return h
}
示例13: Linesearch
// Linesearch performs a linesearch. Optimizer should turn off all non-wolfe status patterns for the gradient and step
func Linesearch(multifun common.MultiObjGrad, method LinesearchMethod, settings univariate.GradSettings, wolfe WolfeConditioner, searchVector []float64, initLoc []float64, initObj float64, initGrad []float64) (*LinesearchResult, error) {
// Linesearch modifies the values of the slices, but should revert the changes by the end
// Find the norm of the search direction
normSearchVector := floats.Norm(searchVector, 2)
// Find the search direction (replace this with an input to avoid make?)
direction := make([]float64, len(searchVector))
copy(direction, searchVector)
floats.Scale(1/normSearchVector, direction)
// Find the initial projection of the gradient into the search direction
initDirectionalGrad := floats.Dot(direction, initGrad)
if initDirectionalGrad > 0 {
return &LinesearchResult{}, errors.New("initial directional gradient must be negative")
}
// Set wolfe constants
wolfe.SetInitState(initObj, initDirectionalGrad)
wolfe.SetCurrState(initObj, initDirectionalGrad, 1.0)
fun := &linesearchFun{
fun: multifun,
wolfe: wolfe,
direction: direction,
initLoc: initLoc,
currLoc: make([]float64, len(initLoc)),
currLocCopy: make([]float64, len(initLoc)),
currGrad: make([]float64, len(initLoc)),
}
settings.Gradient.Initial = initDirectionalGrad
settings.Objective.Initial = initObj
stepSettings := method.GetStepSettings()
stepSettings.InitialStepSize = normSearchVector
// Run optimization, initial location is zero
optVal, optLoc, result, err := univariate.OptimizeGrad(fun, 0, settings, method)
//status, err := common.OptimizeOpter(method, fun)
// Regerate results structure (do this before returning error in case optimizer can recover from it)
// need to scale alpha_k because linesearch is x_k + alpha_k p_k
r := &LinesearchResult{
Loc: fun.currLoc,
Obj: optVal,
Grad: fun.currGrad,
Step: optLoc / normSearchVector,
}
if err != nil {
fmt.Println("Error in linsearch")
return r, errors.New("linesearch: error during linesearch optimization: " + err.Error())
}
stat := result.Status
// Check to make sure that the status due to wolfe status
if stat != common.WolfeConditionsMet {
// If the status wasn't because of wolfe conditions, see if they are met anyway
c := wolfe.Status()
if c == common.WolfeConditionsMet {
// Conditions met, no problem
return r, nil
}
// Conditions not met
return r, errors.New("linesearch: status not because of wolfe conditions.")
}
return r, nil
}
示例14: computeZ
// ComputeZ computes the value of z with the given feature vector and b value.
// Sqrt2OverD = math.Sqrt(2.0 / len(nFeatures))
func computeZ(featurizedInput, feature []float64, b float64, sqrt2OverD float64) float64 {
dot := floats.Dot(featurizedInput, feature)
return sqrt2OverD * (math.Cos(dot + b))
}
示例15: Iterate
func (ls *LinesearchMethod) Iterate(loc *Location) (Operation, error) {
switch ls.lastOp {
case NoOperation:
// TODO(vladimir-ch): Either Init has not been called, or the caller is
// trying to resume the optimization run after Iterate previously
// returned with an error. Decide what is the proper thing to do. See also #125.
case MajorIteration:
// The previous updated location did not converge the full
// optimization. Initialize a new Linesearch.
return ls.initNextLinesearch(loc)
default:
// Update the indicator of valid fields of loc.
ls.eval |= ls.lastOp
if ls.nextMajor {
ls.nextMajor = false
// Linesearcher previously finished, and the invalid fields of loc
// have now been validated. Announce MajorIteration.
ls.lastOp = MajorIteration
return ls.lastOp, nil
}
}
// Continue the linesearch.
f := math.NaN()
if ls.eval&FuncEvaluation != 0 {
f = loc.F
}
projGrad := math.NaN()
if ls.eval&GradEvaluation != 0 {
projGrad = floats.Dot(loc.Gradient, ls.dir)
}
op, step, err := ls.Linesearcher.Iterate(f, projGrad)
if err != nil {
return ls.error(err)
}
switch op {
case MajorIteration:
// Linesearch has been finished.
ls.lastOp = complementEval(loc, ls.eval)
if ls.lastOp == NoOperation {
// loc is complete, MajorIteration can be declared directly.
ls.lastOp = MajorIteration
} else {
// Declare MajorIteration on the next call to Iterate.
ls.nextMajor = true
}
case FuncEvaluation, GradEvaluation, FuncEvaluation | GradEvaluation:
if step != ls.lastStep {
// We are moving to a new location, and not, say, evaluating extra
// information at the current location.
// Compute the next evaluation point and store it in loc.X.
floats.AddScaledTo(loc.X, ls.x, step, ls.dir)
if floats.Equal(ls.x, loc.X) {
// Step size has become so small that the next evaluation point is
// indistinguishable from the starting point for the current
// iteration due to rounding errors.
return ls.error(ErrNoProgress)
}
ls.lastStep = step
ls.eval = NoOperation // Indicate all invalid fields of loc.
}
ls.lastOp = op
default:
panic("linesearch: Linesearcher returned invalid operation")
}
return ls.lastOp, nil
}