本文整理汇总了Golang中github.com/gonum/floats.Scale函数的典型用法代码示例。如果您正苦于以下问题:Golang Scale函数的具体用法?Golang Scale怎么用?Golang Scale使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Scale函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NextDirection
func (l *LBFGS) NextDirection(loc *Location, dir []float64) (stepSize float64) {
// Uses two-loop correction as described in
// Nocedal, J., Wright, S.: Numerical Optimization (2nd ed). Springer (2006), chapter 7, page 178.
if len(loc.X) != l.dim {
panic("lbfgs: unexpected size mismatch")
}
if len(loc.Gradient) != l.dim {
panic("lbfgs: unexpected size mismatch")
}
if len(dir) != l.dim {
panic("lbfgs: unexpected size mismatch")
}
y := l.y[l.oldest]
floats.SubTo(y, loc.Gradient, l.grad)
s := l.s[l.oldest]
floats.SubTo(s, loc.X, l.x)
sDotY := floats.Dot(s, y)
l.rho[l.oldest] = 1 / sDotY
l.oldest = (l.oldest + 1) % l.Store
copy(l.x, loc.X)
copy(l.grad, loc.Gradient)
copy(dir, loc.Gradient)
// Start with the most recent element and go backward,
for i := 0; i < l.Store; i++ {
idx := l.oldest - i - 1
if idx < 0 {
idx += l.Store
}
l.a[idx] = l.rho[idx] * floats.Dot(l.s[idx], dir)
floats.AddScaled(dir, -l.a[idx], l.y[idx])
}
// Scale the initial Hessian.
gamma := sDotY / floats.Dot(y, y)
floats.Scale(gamma, dir)
// Start with the oldest element and go forward.
for i := 0; i < l.Store; i++ {
idx := i + l.oldest
if idx >= l.Store {
idx -= l.Store
}
beta := l.rho[idx] * floats.Dot(l.y[idx], dir)
floats.AddScaled(dir, l.a[idx]-beta, l.s[idx])
}
// dir contains H^{-1} * g, so flip the direction for minimization.
floats.Scale(-1, dir)
return 1
}
示例2: NextDirection
func (l *LBFGS) NextDirection(loc *Location, dir []float64) (stepSize float64) {
if len(loc.X) != l.dim {
panic("lbfgs: unexpected size mismatch")
}
if len(loc.Gradient) != l.dim {
panic("lbfgs: unexpected size mismatch")
}
if len(dir) != l.dim {
panic("lbfgs: unexpected size mismatch")
}
// Update direction. Uses two-loop correction as described in
// Nocedal, Wright (2006), Numerical Optimization (2nd ed.). Chapter 7, page 178.
copy(dir, loc.Gradient)
floats.SubTo(l.y, loc.Gradient, l.grad)
floats.SubTo(l.s, loc.X, l.x)
copy(l.sHist[l.oldest], l.s)
copy(l.yHist[l.oldest], l.y)
sDotY := floats.Dot(l.y, l.s)
l.rhoHist[l.oldest] = 1 / sDotY
l.oldest++
l.oldest = l.oldest % l.Store
copy(l.x, loc.X)
copy(l.grad, loc.Gradient)
// two loop update. First loop starts with the most recent element
// and goes backward, second starts with the oldest element and goes
// forward. At the end have computed H^-1 * g, so flip the direction for
// minimization.
for i := 0; i < l.Store; i++ {
idx := l.oldest - i - 1
if idx < 0 {
idx += l.Store
}
l.a[idx] = l.rhoHist[idx] * floats.Dot(l.sHist[idx], dir)
floats.AddScaled(dir, -l.a[idx], l.yHist[idx])
}
// Scale the initial Hessian.
gamma := sDotY / floats.Dot(l.y, l.y)
floats.Scale(gamma, dir)
for i := 0; i < l.Store; i++ {
idx := i + l.oldest
if idx >= l.Store {
idx -= l.Store
}
beta := l.rhoHist[idx] * floats.Dot(l.yHist[idx], dir)
floats.AddScaled(dir, l.a[idx]-beta, l.sHist[idx])
}
floats.Scale(-1, dir)
return 1
}
示例3: NextDirection
func (n *Newton) NextDirection(loc *Location, dir []float64) (stepSize float64) {
// This method implements Algorithm 3.3 (Cholesky with Added Multiple of
// the Identity) from Nocedal, Wright (2006), 2nd edition.
dim := len(loc.X)
n.hess.CopySym(loc.Hessian)
// Find the smallest diagonal entry of the Hesssian.
minA := n.hess.At(0, 0)
for i := 1; i < dim; i++ {
a := n.hess.At(i, i)
if a < minA {
minA = a
}
}
// If the smallest diagonal entry is positive, the Hessian may be positive
// definite, and so first attempt to apply the Cholesky factorization to
// the un-modified Hessian. If the smallest entry is negative, use the
// final tau from the last iteration if regularization was needed,
// otherwise guess an appropriate value for tau.
if minA > 0 {
n.tau = 0
} else if n.tau == 0 {
n.tau = -minA + 0.001
}
for k := 0; k < maxNewtonModifications; k++ {
if n.tau != 0 {
// Add a multiple of identity to the Hessian.
for i := 0; i < dim; i++ {
n.hess.SetSym(i, i, loc.Hessian.At(i, i)+n.tau)
}
}
// Try to apply the Cholesky factorization.
pd := n.chol.Factorize(n.hess)
if pd {
d := mat64.NewVector(dim, dir)
// Store the solution in d's backing array, dir.
d.SolveCholeskyVec(&n.chol, mat64.NewVector(dim, loc.Gradient))
floats.Scale(-1, dir)
return 1
}
// Modified Hessian is not PD, so increase tau.
n.tau = math.Max(n.Increase*n.tau, 0.001)
}
// Hessian modification failed to get a PD matrix. Return the negative
// gradient as the descent direction.
copy(dir, loc.Gradient)
floats.Scale(-1, dir)
return 1
}
示例4: InitDirection
func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
dim := len(loc.X)
b.dim = dim
b.x = resize(b.x, dim)
copy(b.x, loc.X)
b.grad = resize(b.grad, dim)
copy(b.grad, loc.Gradient)
b.y = resize(b.y, dim)
b.s = resize(b.s, dim)
b.tmp = resize(b.tmp, dim)
b.yVec = mat64.NewVector(dim, b.y)
b.sVec = mat64.NewVector(dim, b.s)
b.tmpVec = mat64.NewVector(dim, b.tmp)
if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim {
b.invHess = mat64.NewSymDense(dim, nil)
} else {
b.invHess = mat64.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim])
}
// The values of the hessian are initialized in the first call to NextDirection
// initial direcion is just negative of gradient because the hessian is 1
copy(dir, loc.Gradient)
floats.Scale(-1, dir)
b.first = true
return 1 / floats.Norm(dir, 2)
}
示例5: locationsAsy
// locationAsy returns the node locations and weights of a Hermite quadrature rule
// with len(x) points.
func (h Hermite) locationsAsy(x, w []float64) {
// A. Townsend, T. Trogdon, and S.Olver, Fast computation of Gauss quadrature
// nodes and weights the whole real line, IMA J. Numer. Anal.,
// 36: 337–358, 2016. http://arxiv.org/abs/1410.5286
// Find the positive locations and weights.
n := len(x)
l := n / 2
xa := x[l:]
wa := w[l:]
for i := range xa {
xa[i], wa[i] = h.locationsAsy0(i, n)
}
// Flip around zero -- copy the negative x locations with the corresponding
// weights.
if n%2 == 0 {
l--
}
for i, v := range xa {
x[l-i] = -v
}
for i, v := range wa {
w[l-i] = v
}
sumW := floats.Sum(w)
c := math.SqrtPi / sumW
floats.Scale(c, w)
}
示例6: TestCategoricalCDF
func TestCategoricalCDF(t *testing.T) {
for _, test := range [][]float64{
{1, 2, 3, 0, 4},
} {
c := make([]float64, len(test))
copy(c, test)
floats.Scale(1/floats.Sum(c), c)
sum := make([]float64, len(test))
floats.CumSum(sum, c)
dist := NewCategorical(test, nil)
cdf := dist.CDF(-0.5)
if cdf != 0 {
t.Errorf("CDF of negative number not zero")
}
for i := range c {
cdf := dist.CDF(float64(i))
if math.Abs(cdf-sum[i]) > 1e-14 {
t.Errorf("CDF mismatch %v. Want %v, got %v.", float64(i), sum[i], cdf)
}
cdfp := dist.CDF(float64(i) + 0.5)
if cdfp != cdf {
t.Errorf("CDF mismatch for non-integer input")
}
}
}
}
示例7: TestCategoricalProb
func TestCategoricalProb(t *testing.T) {
for _, test := range [][]float64{
{1, 2, 3, 0},
} {
dist := NewCategorical(test, nil)
norm := make([]float64, len(test))
floats.Scale(1/floats.Sum(norm), norm)
for i, v := range norm {
p := dist.Prob(float64(i))
if math.Abs(p-v) > 1e-14 {
t.Errorf("Probability mismatch element %d", i)
}
p = dist.Prob(float64(i) + 0.5)
if p != 0 {
t.Errorf("Non-zero probability for non-integer x")
}
}
p := dist.Prob(-1)
if p != 0 {
t.Errorf("Non-zero probability for -1")
}
p = dist.Prob(float64(len(test)))
if p != 0 {
t.Errorf("Non-zero probability for len(test)")
}
}
}
示例8: FuncGrad
// Not callable in parallel because of the batches
func (g *GradOptimizable) FuncGrad(params []float64, deriv []float64) float64 {
inds := g.Sampler.Iterate()
total := len(inds)
var totalLoss float64
for i := range deriv {
deriv[i] = 0
}
// Send the regularizer
g.batches[0].parameters = params
g.regularizeChan <- g.batches[0]
// Send initial batches out
var initBatches int
var lastSent int
for i := 0; i < g.NumWorkers; i++ {
if lastSent == total {
break
}
add := g.grainSize
if lastSent+add >= total {
add = total - lastSent
}
initBatches++
g.batches[i+1].idxs = inds[lastSent : lastSent+add]
g.batches[i+1].parameters = params
g.sendWork <- g.batches[i+1]
lastSent += add
}
// Collect the batches and resend out
for lastSent < total {
batch := <-g.receiveWork
totalLoss += batch.loss
floats.Add(deriv, batch.deriv)
add := g.grainSize
if lastSent+add >= total {
add = total - lastSent
}
batch.idxs = inds[lastSent : lastSent+add]
g.sendWork <- batch
lastSent += add
}
// All inds sent, so just weight for all the collection
for i := 0; i < initBatches; i++ {
batch := <-g.receiveWork
totalLoss += batch.loss
floats.Add(deriv, batch.deriv)
}
batch := <-g.regDone
totalLoss += batch.loss
floats.Add(deriv, batch.deriv)
totalLoss /= float64(len(inds))
floats.Scale(1/float64(len(inds)), deriv)
return totalLoss
}
示例9: returnNext
// returnNext updates the location based on the iteration type and the current
// simplex, and returns the next operation.
func (n *NelderMead) returnNext(iter nmIterType, loc *Location) (Operation, error) {
n.lastIter = iter
switch iter {
case nmMajor:
// Fill loc with the current best point and value,
// and command a convergence check.
copy(loc.X, n.vertices[0])
loc.F = n.values[0]
return MajorIteration, nil
case nmReflected, nmExpanded, nmContractedOutside, nmContractedInside:
// x_new = x_centroid + scale * (x_centroid - x_worst)
var scale float64
switch iter {
case nmReflected:
scale = n.reflection
case nmExpanded:
scale = n.reflection * n.expansion
case nmContractedOutside:
scale = n.reflection * n.contraction
case nmContractedInside:
scale = -n.contraction
}
dim := len(loc.X)
floats.SubTo(loc.X, n.centroid, n.vertices[dim])
floats.Scale(scale, loc.X)
floats.Add(loc.X, n.centroid)
if iter == nmReflected {
copy(n.reflectedPoint, loc.X)
}
return FuncEvaluation, nil
case nmShrink:
// x_shrink = x_best + delta * (x_i + x_best)
floats.SubTo(loc.X, n.vertices[n.fillIdx], n.vertices[0])
floats.Scale(n.shrink, loc.X)
floats.Add(loc.X, n.vertices[0])
return FuncEvaluation, nil
default:
panic("unreachable")
}
}
示例10: ObjGrad
// ObjDeriv computes the objective value and stores the derivative in place
func (g *BatchGradBased) ObjGrad(parameters []float64, derivative []float64) (loss float64) {
c := make(chan lossDerivStruct, 10)
// Set the channel for parallel for
f := func(start, end int) {
g.lossDerivFunc(start, end, c, parameters)
}
go func() {
wg := &sync.WaitGroup{}
// Compute the losses and the derivatives all in parallel
wg.Add(2)
go func() {
common.ParallelFor(g.nTrain, g.grainSize, f)
wg.Done()
}()
// Compute the regularization
go func() {
deriv := make([]float64, g.nParameters)
loss := g.regularizer.LossDeriv(parameters, deriv)
//fmt.Println("regularizer loss = ", loss)
//fmt.Println("regularizer deriv = ", deriv)
c <- lossDerivStruct{
loss: loss,
deriv: deriv,
}
wg.Done()
}()
// Wait for all of the results to be sent on the channel
wg.Wait()
// Close the channel
close(c)
}()
// zero the derivative
for i := range derivative {
derivative[i] = 0
}
// Range over the channel, incrementing the loss and derivative
// as they come in
for l := range c {
loss += l.loss
floats.Add(derivative, l.deriv)
}
//fmt.Println("nTrain", g.nTrain)
//fmt.Println("final deriv", derivative)
// Normalize by the number of training samples
loss /= float64(g.nTrain)
floats.Scale(1/float64(g.nTrain), derivative)
return loss
}
示例11: UpdateOne
// UpdateOne updates sufficient statistics using one observation.
func (g *Model) UpdateOne(o model.Obs, w float64) {
glog.V(6).Infof("gaussian update, name:%s, obs:%v, weight:%e", g.ModelName, o, w)
/* Update sufficient statistics. */
obs, _, _ := model.ObsToF64(o)
floatx.Apply(floatx.ScaleFunc(w), obs, g.tmpArray)
floats.Add(g.Sumx, g.tmpArray)
floatx.Sq(g.tmpArray, obs)
floats.Scale(w, g.tmpArray)
floats.Add(g.Sumxsq, g.tmpArray)
g.NSamples += w
}
示例12: sampleCategorical
func sampleCategorical(t *testing.T, dist Categorical, nSamples int) []float64 {
counts := make([]float64, dist.Len())
for i := 0; i < nSamples; i++ {
v := dist.Rand()
if float64(int(v)) != v {
t.Fatalf("Random number is not an integer")
}
counts[int(v)]++
}
sum := floats.Sum(counts)
floats.Scale(1/sum, counts)
return counts
}
示例13: TestJensenShannon
func TestJensenShannon(t *testing.T) {
for i, test := range []struct {
p []float64
q []float64
}{
{
p: []float64{0.5, 0.1, 0.3, 0.1},
q: []float64{0.1, 0.4, 0.25, 0.25},
},
{
p: []float64{0.4, 0.6, 0.0},
q: []float64{0.2, 0.2, 0.6},
},
{
p: []float64{0.1, 0.1, 0.0, 0.8},
q: []float64{0.6, 0.3, 0.0, 0.1},
},
{
p: []float64{0.5, 0.1, 0.3, 0.1},
q: []float64{0.5, 0, 0.25, 0.25},
},
{
p: []float64{0.5, 0.1, 0, 0.4},
q: []float64{0.1, 0.4, 0.25, 0.25},
},
} {
m := make([]float64, len(test.p))
p := test.p
q := test.q
floats.Add(m, p)
floats.Add(m, q)
floats.Scale(0.5, m)
js1 := 0.5*KullbackLeibler(p, m) + 0.5*KullbackLeibler(q, m)
js2 := JensenShannon(p, q)
if math.IsNaN(js2) {
t.Errorf("In case %v, JS distance is NaN", i)
}
if math.Abs(js1-js2) > 1e-14 {
t.Errorf("JS mismatch case %v. Expected %v, found %v.", i, js1, js2)
}
}
if !Panics(func() { JensenShannon(make([]float64, 3), make([]float64, 2)) }) {
t.Errorf("JensenShannon did not panic with p, q length mismatch")
}
}
示例14: cosCorrMultiNaive
// Explicitly forms vectors and computes normalized dot product.
func cosCorrMultiNaive(f, g *rimg64.Multi) *rimg64.Image {
h := rimg64.New(f.Width-g.Width+1, f.Height-g.Height+1)
n := g.Width * g.Height * g.Channels
a := make([]float64, n)
b := make([]float64, n)
for i := 0; i < h.Width; i++ {
for j := 0; j < h.Height; j++ {
a = a[:0]
b = b[:0]
for u := 0; u < g.Width; u++ {
for v := 0; v < g.Height; v++ {
for p := 0; p < g.Channels; p++ {
a = append(a, f.At(i+u, j+v, p))
b = append(b, g.At(u, v, p))
}
}
}
floats.Scale(1/floats.Norm(a, 2), a)
floats.Scale(1/floats.Norm(b, 2), b)
h.Set(i, j, floats.Dot(a, b))
}
}
return h
}
示例15: returnNext
// returnNext finds the next location to evaluate, stores the location in xNext,
// and returns the data
func (n *NelderMead) returnNext(iter nmIterType, xNext []float64) (EvaluationType, IterationType, error) {
dim := len(xNext)
n.lastIter = iter
switch iter {
case nmReflected, nmExpanded, nmContractedOutside, nmContractedInside:
// x_new = x_centroid + scale * (x_centroid - x_worst)
var scale float64
switch iter {
case nmReflected:
scale = n.reflection
case nmExpanded:
scale = n.reflection * n.expansion
case nmContractedOutside:
scale = n.reflection * n.contraction
case nmContractedInside:
scale = -n.contraction
}
floats.SubTo(xNext, n.centroid, n.vertices[dim])
floats.Scale(scale, xNext)
floats.Add(xNext, n.centroid)
if iter == nmReflected {
copy(n.reflectedPoint, xNext)
// Nelder Mead iterations start with Reflection step
return FuncEvaluation, MajorIteration, nil
}
return FuncEvaluation, MinorIteration, nil
case nmShrink:
// x_shrink = x_best + delta * (x_i + x_best)
floats.SubTo(xNext, n.vertices[n.fillIdx], n.vertices[0])
floats.Scale(n.shrink, xNext)
floats.Add(xNext, n.vertices[0])
return FuncEvaluation, SubIteration, nil
default:
panic("unreachable")
}
}