本文整理汇总了Golang中github.com/gonum/matrix/mat64.NewVector函数的典型用法代码示例。如果您正苦于以下问题:Golang NewVector函数的具体用法?Golang NewVector怎么用?Golang NewVector使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewVector函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: InitDirection
func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
dim := len(loc.X)
b.dim = dim
b.x = resize(b.x, dim)
copy(b.x, loc.X)
b.grad = resize(b.grad, dim)
copy(b.grad, loc.Gradient)
b.y = resize(b.y, dim)
b.s = resize(b.s, dim)
b.tmp = resize(b.tmp, dim)
b.yVec = mat64.NewVector(dim, b.y)
b.sVec = mat64.NewVector(dim, b.s)
b.tmpVec = mat64.NewVector(dim, b.tmp)
if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim {
b.invHess = mat64.NewSymDense(dim, nil)
} else {
b.invHess = mat64.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim])
}
// The values of the hessian are initialized in the first call to NextDirection
// initial direcion is just negative of gradient because the hessian is 1
copy(dir, loc.Gradient)
floats.Scale(-1, dir)
b.first = true
return 1 / floats.Norm(dir, 2)
}
示例2: GradientDescent
func GradientDescent(X *mat64.Dense, y *mat64.Vector, alpha, tolerance float64, maxIters int) *mat64.Vector {
// m = Number of Training Examples
// n = Number of Features
m, n := X.Dims()
h := mat64.NewVector(m, nil)
partials := mat64.NewVector(n, nil)
new_theta := mat64.NewVector(n, nil)
Regression:
for i := 0; i < maxIters; i++ {
// Calculate partial derivatives
h.MulVec(X, new_theta)
for el := 0; el < m; el++ {
val := (h.At(el, 0) - y.At(el, 0)) / float64(m)
h.SetVec(el, val)
}
partials.MulVec(X.T(), h)
// Update theta values
for el := 0; el < n; el++ {
new_val := new_theta.At(el, 0) - (alpha * partials.At(el, 0))
new_theta.SetVec(el, new_val)
}
// Check the "distance" to the local minumum
dist := math.Sqrt(mat64.Dot(partials, partials))
if dist <= tolerance {
break Regression
}
}
return new_theta
}
示例3: Init
func (bicg *BiCG) Init(ctx *Context) Operation {
if bicg.BreakdownTolerance == 0 {
bicg.BreakdownTolerance = 1e-6
}
bicg.rho = math.NaN()
dim := ctx.X.Len()
if ctx.P == nil || ctx.P.Len() != dim {
ctx.P = mat64.NewVector(dim, nil)
}
if ctx.Ap == nil || ctx.Ap.Len() != dim {
ctx.Ap = mat64.NewVector(dim, nil)
}
if ctx.Q == nil || ctx.Q.Len() != dim {
ctx.Q = mat64.NewVector(dim, nil)
}
if ctx.Aq == nil || ctx.Aq.Len() != dim {
ctx.Aq = mat64.NewVector(dim, nil)
}
if ctx.Z == nil || ctx.Z.Len() != dim {
ctx.Z = mat64.NewVector(dim, nil)
}
bicg.resume = 2
return SolvePreconditioner
// Solve M z = r_{i-1}
}
示例4: Fit
func (lr *LinearRegression) Fit() {
h := *mat64.NewVector(lr.m, nil)
partials := mat64.NewVector(lr.n, nil)
alpha_m := lr.alpha / float64(lr.m)
Descent:
for i := 0; i < lr.maxIters; i++ {
// Calculate partial derivatives
h.MulVec(lr.x, lr.Theta)
for x := 0; x < lr.m; x++ {
h.SetVec(x, h.At(x, 0)-lr.y.At(x, 0))
}
partials.MulVec(lr.x.T(), &h)
// Update theta values with the precalculated partials
for x := 0; x < lr.n; x++ {
theta_j := lr.Theta.At(x, 0) - alpha_m*partials.At(x, 0)
lr.Theta.SetVec(x, theta_j)
}
// Check the "distance" to the local minumum
dist := math.Sqrt(mat64.Dot(partials, partials))
if dist <= lr.tolerance {
break Descent
}
}
}
示例5: TestHypothesis
func TestHypothesis(t *testing.T) {
for _, test := range []struct {
theta *mat64.Vector
x *mat64.Vector
y float64
}{
{
mat64.NewVector(2, []float64{0, 2}),
mat64.NewVector(2, []float64{0, 1}),
2.0,
}, {
mat64.NewVector(2, []float64{0, 2}),
mat64.NewVector(2, []float64{0, 2}),
4.0,
}, {
mat64.NewVector(2, []float64{0, 2}),
mat64.NewVector(2, []float64{0, 10}),
20.0,
}, {
mat64.NewVector(2, []float64{1, 2}),
mat64.NewVector(2, []float64{1, 10}),
21.0,
}, {
mat64.NewVector(3, []float64{1, 2.5, 5}),
mat64.NewVector(3, []float64{10, 20, 0}),
60.0,
},
} {
h := Hypothesis(test.x, test.theta)
if h != test.y {
t.Errorf("Hypothesis(%v,%v) is expected to be equal to %v, found %v", test.x, test.theta, test.y, h)
}
}
}
示例6: InitDirection
func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
dim := len(loc.X)
b.dim = dim
b.first = true
x := mat64.NewVector(dim, loc.X)
grad := mat64.NewVector(dim, loc.Gradient)
b.x.CloneVec(x)
b.grad.CloneVec(grad)
b.y.Reset()
b.s.Reset()
b.tmp.Reset()
if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim {
b.invHess = mat64.NewSymDense(dim, nil)
} else {
b.invHess = mat64.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim])
}
// The values of the inverse Hessian are initialized in the first call to
// NextDirection.
// Initial direction is just negative of the gradient because the Hessian
// is an identity matrix.
d := mat64.NewVector(dim, dir)
d.ScaleVec(-1, grad)
return 1 / mat64.Norm(d, 2)
}
示例7: transformNormal
// transformNormal performs the same operation as TransformNormal except no
// safety checks are performed and both input slices must be non-nil.
func (n *Normal) transformNormal(dst, normal []float64) []float64 {
srcVec := mat64.NewVector(n.dim, normal)
dstVec := mat64.NewVector(n.dim, dst)
dstVec.MulVec(&n.lower, srcVec)
floats.Add(dst, n.mu)
return dst
}
示例8: LinearLeastSquares
// LinearLeastSquares computes the least squares fit for the function
//
// f(x) = ╬њРѓђtermsРѓђ(x) + ╬њРѓЂtermsРѓЂ(x) + ...
//
// to the data (xs[i], ys[i]). It returns the parameters ╬њРѓђ, ╬њРѓЂ, ...
// that minimize the sum of the squares of the residuals of f:
//
// РѕЉ (ys[i] - f(xs[i]))┬▓
//
// If weights is non-nil, it is used to weight these residuals:
//
// РѕЉ weights[i] ├Ќ (ys[i] - f(xs[i]))┬▓
//
// The function f is specified by one Go function for each linear
// term. For efficiency, the Go function is vectorized: it will be
// passed a slice of x values in xs and must fill the slice termOut
// with the value of the term for each value in xs.
func LinearLeastSquares(xs, ys, weights []float64, terms ...func(xs, termOut []float64)) (params []float64) {
// The optimal parameters are found by solving for ╬њ╠ѓ in the
// "normal equations":
//
// (ЮљЌрхђЮљќЮљЌ)╬њ╠ѓ = ЮљЌрхђЮљќЮљ▓
//
// where Юљќ is a diagonal weight matrix (or the identity matrix
// for the unweighted case).
// TODO: Consider using orthogonal decomposition.
if len(xs) != len(ys) {
panic("len(xs) != len(ys)")
}
if weights != nil && len(xs) != len(weights) {
panic("len(xs) != len(weights")
}
// Construct ЮљЌрхђ. This is the more convenient representation
// for efficiently calling the term functions.
xTVals := make([]float64, len(terms)*len(xs))
for i, term := range terms {
term(xs, xTVals[i*len(xs):i*len(xs)+len(xs)])
}
XT := mat64.NewDense(len(terms), len(xs), xTVals)
X := XT.T()
// Construct ЮљЌрхђЮљќ.
var XTW *mat64.Dense
if weights == nil {
// Юљќ is the identity matrix.
XTW = XT
} else {
// Since Юљќ is a diagonal matrix, we do this directly.
XTW = mat64.DenseCopyOf(XT)
WDiag := mat64.NewVector(len(weights), weights)
for row := 0; row < len(terms); row++ {
rowView := XTW.RowView(row)
rowView.MulElemVec(rowView, WDiag)
}
}
// Construct Юљ▓.
y := mat64.NewVector(len(ys), ys)
// Compute ╬њ╠ѓ.
lhs := mat64.NewDense(len(terms), len(terms), nil)
lhs.Mul(XTW, X)
rhs := mat64.NewVector(len(terms), nil)
rhs.MulVec(XTW, y)
BVals := make([]float64, len(terms))
B := mat64.NewVector(len(terms), BVals)
B.SolveVec(lhs, rhs)
return BVals
}
示例9: TestVectorDistance
// Calculates the distance between to vectors
func TestVectorDistance(t *testing.T) {
vec1 := mat.NewVector(3, []float64{4, 6, 2})
vec2 := mat.NewVector(3, []float64{1, 9, 3})
expectedAns := float64(19)
if expectedAns != vectorDistance(vec1, vec2) {
t.Errorf("Expected %f, got %f", expectedAns, vectorDistance(vec1, vec2))
}
}
示例10: NextDirection
func (n *Newton) NextDirection(loc *Location, dir []float64) (stepSize float64) {
// This method implements Algorithm 3.3 (Cholesky with Added Multiple of
// the Identity) from Nocedal, Wright (2006), 2nd edition.
dim := len(loc.X)
n.hess.CopySym(loc.Hessian)
// Find the smallest diagonal entry of the Hesssian.
minA := n.hess.At(0, 0)
for i := 1; i < dim; i++ {
a := n.hess.At(i, i)
if a < minA {
minA = a
}
}
// If the smallest diagonal entry is positive, the Hessian may be positive
// definite, and so first attempt to apply the Cholesky factorization to
// the un-modified Hessian. If the smallest entry is negative, use the
// final tau from the last iteration if regularization was needed,
// otherwise guess an appropriate value for tau.
if minA > 0 {
n.tau = 0
} else if n.tau == 0 {
n.tau = -minA + 0.001
}
for k := 0; k < maxNewtonModifications; k++ {
if n.tau != 0 {
// Add a multiple of identity to the Hessian.
for i := 0; i < dim; i++ {
n.hess.SetSym(i, i, loc.Hessian.At(i, i)+n.tau)
}
}
// Try to apply the Cholesky factorization.
pd := n.chol.Factorize(n.hess)
if pd {
d := mat64.NewVector(dim, dir)
// Store the solution in d's backing array, dir.
d.SolveCholeskyVec(&n.chol, mat64.NewVector(dim, loc.Gradient))
floats.Scale(-1, dir)
return 1
}
// Modified Hessian is not PD, so increase tau.
n.tau = math.Max(n.Increase*n.tau, 0.001)
}
// Hessian modification failed to get a PD matrix. Return the negative
// gradient as the descent direction.
copy(dir, loc.Gradient)
floats.Scale(-1, dir)
return 1
}
示例11: newMargLikeMemory
func newMargLikeMemory(hyper, outputs int) *margLikeMemory {
m := &margLikeMemory{
lastX: make([]float64, hyper),
k: mat64.NewSymDense(outputs, nil),
chol: &mat64.Cholesky{},
alpha: mat64.NewVector(outputs, nil),
tmp: mat64.NewVector(1, nil),
dKdTheta: make([]*mat64.SymDense, hyper),
kInvDK: mat64.NewDense(outputs, outputs, nil),
}
for i := 0; i < hyper; i++ {
m.dKdTheta[i] = mat64.NewSymDense(outputs, nil)
}
return m
}
示例12: Solve
func Solve(a sparse.Matrix, b, xInit *mat64.Vector, settings *Settings, method Method) (result Result, err error) {
stats := Stats{
StartTime: time.Now(),
}
dim, c := a.Dims()
if dim != c {
panic("iterative: matrix is not square")
}
if xInit != nil && dim != xInit.Len() {
panic("iterative: mismatched size of the initial guess")
}
if b.Len() != dim {
panic("iterative: mismatched size of the right-hand side vector")
}
if xInit == nil {
xInit = mat64.NewVector(dim, nil)
}
if settings == nil {
settings = DefaultSettings(dim)
}
ctx := Context{
X: mat64.NewVector(dim, nil),
Residual: mat64.NewVector(dim, nil),
}
// X = xInit
ctx.X.CopyVec(xInit)
if mat64.Norm(ctx.X, math.Inf(1)) > 0 {
// Residual = Ax
sparse.MulMatVec(ctx.Residual, 1, false, a, ctx.X)
stats.MatVecMultiplies++
}
// Residual = Ax - b
ctx.Residual.SubVec(ctx.Residual, b)
if mat64.Norm(ctx.Residual, 2) >= settings.Tolerance {
err = iterate(method, a, b, settings, &ctx, &stats)
}
result = Result{
X: ctx.X,
Stats: stats,
Runtime: time.Since(stats.StartTime),
}
return result, err
}
示例13: findLinearlyIndependent
// findLinearlyIndependnt finds a set of linearly independent columns of A, and
// returns the column indexes of the linearly independent columns.
func findLinearlyIndependent(A mat64.Matrix) []int {
m, n := A.Dims()
idxs := make([]int, 0, m)
columns := mat64.NewDense(m, m, nil)
newCol := make([]float64, m)
// Walk in reverse order because slack variables are typically the last columns
// of A.
for i := n - 1; i >= 0; i-- {
if len(idxs) == m {
break
}
mat64.Col(newCol, i, A)
if len(idxs) == 0 {
// A column is linearly independent from the null set.
// This is what needs to be changed if zero columns are allowed, as
// a column of all zeros is not linearly independent from itself.
columns.SetCol(len(idxs), newCol)
idxs = append(idxs, i)
continue
}
if linearlyDependent(mat64.NewVector(m, newCol), columns.View(0, 0, m, len(idxs))) {
continue
}
columns.SetCol(len(idxs), newCol)
idxs = append(idxs, i)
}
return idxs
}
示例14: TestGather
func TestGather(t *testing.T) {
for i, test := range []struct {
y []float64
indices []int
want []float64
}{
{
y: []float64{1, 2, 3, 4},
indices: []int{0, 2, 3},
want: []float64{1, 3, 4},
},
{
indices: []int{0, 2, 3, 6},
y: []float64{1, 2, 3, 4, 5, 6, 7, 8},
want: []float64{1, 3, 4, 7},
},
} {
y := mat64.NewVector(len(test.y), test.y)
var x Vector
Gather(&x, y, test.indices)
if x.N != y.Len() {
t.Errorf("%d: wrong dimension, want = %v, got = %v ", i, y.Len(), x.N)
}
if !reflect.DeepEqual(x.Data, test.want) {
t.Errorf("%d: data not equal, want = %v, got %v\n", i, test.want, x.Data)
}
if !reflect.DeepEqual(x.Indices, test.indices) {
t.Errorf("%d: indices not equal, want = %v, got %v\n", i, test.indices, x.Indices)
}
}
}
示例15: TestLeastSquares
func TestLeastSquares(t *testing.T) {
matA := mat64.NewDense(5, 3, []float64{
1, -2, 4,
1, -1, 1,
1, 0, 0,
1, 1, 1,
1, 2, 4,
})
vecb := mat64.NewVector(5, []float64{
0,
0,
1,
0,
0,
})
x := vec3(linalg.LeastSquares(matA, vecb))
expected := Vec3{34.0 / 70.0, 0.0, -10.0 / 70.0}
if x != expected {
t.Errorf("expected %v, got %v", expected, x)
}
}