本文整理汇总了Golang中github.com/gonum/matrix/mat64.Dense.Mul方法的典型用法代码示例。如果您正苦于以下问题:Golang Dense.Mul方法的具体用法?Golang Dense.Mul怎么用?Golang Dense.Mul使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/gonum/matrix/mat64.Dense
的用法示例。
在下文中一共展示了Dense.Mul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Cov
// Cov returns the covariance between a set of data points based on the current
// GP fit.
func (g *GP) Cov(m *mat64.SymDense, x mat64.Matrix) *mat64.SymDense {
if m != nil {
// TODO(btracey): Make this k**
panic("resuing m not coded")
}
// The joint covariance matrix is
// K(x_*, k_*) - k(x_*, x) k(x,x)^-1 k(x, x*)
nSamp, nDim := x.Dims()
if nDim != g.inputDim {
panic(badInputLength)
}
// Compute K(x_*, x) K(x, x)^-1 K(x, x_*)
kstar := g.formKStar(x)
var tmp mat64.Dense
tmp.SolveCholesky(g.cholK, kstar)
var tmp2 mat64.Dense
tmp2.Mul(kstar.T(), &tmp)
// Compute k(x_*, x_*) and perform the subtraction.
kstarstar := mat64.NewSymDense(nSamp, nil)
for i := 0; i < nSamp; i++ {
for j := i; j < nSamp; j++ {
v := g.kernel.Distance(mat64.Row(nil, i, x), mat64.Row(nil, j, x))
if i == j {
v += g.noise
}
kstarstar.SetSym(i, j, v-tmp2.At(i, j))
}
}
return kstarstar
}
示例2: ExampleCholesky
func ExampleCholesky() {
// Construct a symmetric positive definite matrix.
tmp := mat64.NewDense(4, 4, []float64{
2, 6, 8, -4,
1, 8, 7, -2,
2, 2, 1, 7,
8, -2, -2, 1,
})
var a mat64.SymDense
a.SymOuterK(1, tmp)
fmt.Printf("a = %0.4v\n", mat64.Formatted(&a, mat64.Prefix(" ")))
// Compute the cholesky factorization.
var chol mat64.Cholesky
if ok := chol.Factorize(&a); !ok {
fmt.Println("a matrix is not positive semi-definite.")
}
// Find the determinant.
fmt.Printf("\nThe determinant of a is %0.4g\n\n", chol.Det())
// Use the factorization to solve the system of equations a * x = b.
b := mat64.NewVector(4, []float64{1, 2, 3, 4})
var x mat64.Vector
if err := x.SolveCholeskyVec(&chol, b); err != nil {
fmt.Println("Matrix is near singular: ", err)
}
fmt.Println("Solve a * x = b")
fmt.Printf("x = %0.4v\n", mat64.Formatted(&x, mat64.Prefix(" ")))
// Extract the factorization and check that it equals the original matrix.
var t mat64.TriDense
t.LFromCholesky(&chol)
var test mat64.Dense
test.Mul(&t, t.T())
fmt.Println()
fmt.Printf("L * L^T = %0.4v\n", mat64.Formatted(&a, mat64.Prefix(" ")))
// Output:
// a = ⎡120 114 -4 -16⎤
// ⎢114 118 11 -24⎥
// ⎢ -4 11 58 17⎥
// ⎣-16 -24 17 73⎦
//
// The determinant of a is 1.543e+06
//
// Solve a * x = b
// x = ⎡ -0.239⎤
// ⎢ 0.2732⎥
// ⎢-0.04681⎥
// ⎣ 0.1031⎦
//
// L * L^T = ⎡120 114 -4 -16⎤
// ⎢114 118 11 -24⎥
// ⎢ -4 11 58 17⎥
// ⎣-16 -24 17 73⎦
}
示例3: forward
func forward(x *mat64.Dense) (yHat mat64.Dense) {
z2 := mat64.Dense{} // 3x3
a2 := mat64.Dense{} // 3x3
z3 := mat64.Dense{} // 3x1
z2.Mul(x, w1)
a2.Apply(sigmoidApplyable, &z2)
z3.Mul(&a2, w2)
// yHat is 3x1
yHat.Apply(sigmoidApplyable, &z3)
return
}
示例4: predictFeaturized
// predictFeaturized multiplies the featureWeights by the featurized input and stores the value. It assumes
// that inMat and outMat already have the correct shape, but will replace the data in them
func predictFeaturized(featurizedInput []float64, output []float64, featureWeights *mat64.Dense, inMat *mat64.Dense, outMat *mat64.Dense) {
rm := inMat.RawMatrix()
rmin.Data = featurizedInput
inMat.LoadRawMatrix(rmin)
rm = outMat.RawMatrix()
rm.Data = outMat
outMat.LoadRawMatrix(rmin)
// Multiply the feature weights by the featurized input ond store
outMat.Mul(inMat, featureWeights)
}
示例5: Update
func (self *Layer) Update(learningConfiguration LearningConfiguration) {
var deltas mat64.Dense
deltas.Mul(self.Deltas, self.Input)
rows, cols := self.Weight.Dims()
weight := self.Weight.View(0, 0, rows-1, cols).(*mat64.Dense)
if *learningConfiguration.Decay > 0 {
var decay mat64.Dense
decay.Scale(*learningConfiguration.Decay, weight)
deltas.Sub(&deltas, decay.T())
}
deltas.Scale(*learningConfiguration.Rate, &deltas)
weight.Sub(weight, deltas.T())
}
示例6: mulMulti
func mulMulti(a *mat64.Dense, b []float64, rows int) (r []float64) {
var m, m2 mat64.Dense
b1 := mat64.NewDense(1, 1, []float64{b[0]})
b2 := mat64.NewDense(1, 1, []float64{b[1]})
m.Mul(a.ColView(0), b1)
m2.Mul(a.ColView(1), b2)
for i := 0; i < rows; i++ {
r = append(r, m.ColView(0).At(i, 0)+m2.ColView(0).At(i, 0))
}
return r
}
示例7: LinearSolve
// LinearSolve trains a Linear algorithm.
// Assumes inputs and outputs are already scaled
// If features is nil will call featurize
// Will return nil if regularizer is not a linear regularizer
// Is destructive if any of the weights are zero
// Losser is always the two-norm
// Does not set the value of the parameters (in case this is called in parallel with a different routine)
func LinearSolve(linearTrainable LinearTrainable, features *mat64.Dense, inputs, trueOutputs common.RowMatrix,
weights []float64, regularizer regularize.Regularizer) (parameters []float64) {
// TODO: Allow tikhonov regularization
// TODO: Add test for weights
// TODO: Need to do something about returning a []float64
if !IsLinearSolveRegularizer(regularizer) {
return nil
}
if features == nil {
features = FeaturizeTrainable(linearTrainable, inputs, features)
}
_, nFeatures := features.Dims()
var weightedFeatures, weightedOutput *mat64.Dense
if weights != nil {
scaledWeight := make([]float64, len(weights))
for i, weight := range weights {
scaledWeight[i] = math.Sqrt(weight)
}
diagWeight := diagonal.NewDiagonal(nFeatures, weights)
nSamples, outputDim := trueOutputs.Dims()
weightedOutput = mat64.NewDense(nSamples, outputDim, nil)
weightedFeatures = mat64.NewDense(nSamples, nFeatures, nil)
weightedOutput.Mul(diagWeight, trueOutputs)
weightedFeatures.Mul(diagWeight, features)
}
switch regularizer.(type) {
case nil:
case regularize.None:
default:
panic("Shouldn't be here. Must be error in IsLinearRegularizer")
}
if weights == nil {
parameterMat := mat64.Solve(features, trueOutputs)
return parameterMat.RawMatrix().Data
}
parameterMat := mat64.Solve(weightedFeatures, weightedOutput)
return parameterMat.RawMatrix().Data
}
示例8: main
func main() {
a := mat64.NewDense(2, 4, []float64{
1, 2, 3, 4,
5, 6, 7, 8,
})
b := mat64.NewDense(4, 3, []float64{
1, 2, 3,
4, 5, 6,
7, 8, 9,
10, 11, 12,
})
var m mat64.Dense
m.Mul(a, b)
fmt.Println(mat64.Formatted(&m))
}
示例9: Fit
func (lr *LinearRegression) Fit(inst *base.Instances) error {
if inst.Rows < inst.GetAttributeCount() {
return NotEnoughDataError
}
// Split into two matrices, observed results (dependent variable y)
// and the explanatory variables (X) - see http://en.wikipedia.org/wiki/Linear_regression
observed := mat64.NewDense(inst.Rows, 1, nil)
explVariables := mat64.NewDense(inst.Rows, inst.GetAttributeCount(), nil)
for i := 0; i < inst.Rows; i++ {
observed.Set(i, 0, inst.Get(i, inst.ClassIndex)) // Set observed data
for j := 0; j < inst.GetAttributeCount(); j++ {
if j == 0 {
// Set intercepts to 1.0
// Could / should be done better: http://www.theanalysisfactor.com/interpret-the-intercept/
explVariables.Set(i, 0, 1.0)
} else {
explVariables.Set(i, j, inst.Get(i, j-1))
}
}
}
n := inst.GetAttributeCount()
qr := mat64.QR(explVariables)
q := qr.Q()
reg := qr.R()
var transposed, qty mat64.Dense
transposed.TCopy(q)
qty.Mul(&transposed, observed)
regressionCoefficients := make([]float64, n)
for i := n - 1; i >= 0; i-- {
regressionCoefficients[i] = qty.At(i, 0)
for j := i + 1; j < n; j++ {
regressionCoefficients[i] -= regressionCoefficients[j] * reg.At(i, j)
}
regressionCoefficients[i] /= reg.At(i, i)
}
lr.disturbance = regressionCoefficients[0]
lr.regressionCoefficients = regressionCoefficients[1:]
lr.fitted = true
return nil
}
示例10: randmatmul
func randmatmul(n int) *mat64.Dense {
aData := make([]float64, n*n)
for i := range aData {
aData[i] = rnd.Float64()
}
a := mat64.NewDense(n, n, aData)
bData := make([]float64, n*n)
for i := range bData {
bData[i] = rnd.Float64()
}
b := mat64.NewDense(n, n, bData)
var c mat64.Dense
c.Mul(a, b)
return &c
}
示例11: ExamplePrincipalComponents
func ExamplePrincipalComponents() {
// iris is a truncated sample of the Fisher's Iris dataset.
n := 10
d := 4
iris := mat64.NewDense(n, d, []float64{
5.1, 3.5, 1.4, 0.2,
4.9, 3.0, 1.4, 0.2,
4.7, 3.2, 1.3, 0.2,
4.6, 3.1, 1.5, 0.2,
5.0, 3.6, 1.4, 0.2,
5.4, 3.9, 1.7, 0.4,
4.6, 3.4, 1.4, 0.3,
5.0, 3.4, 1.5, 0.2,
4.4, 2.9, 1.4, 0.2,
4.9, 3.1, 1.5, 0.1,
})
// Calculate the principal component direction vectors
// and variances.
vecs, vars, ok := stat.PrincipalComponents(iris, nil)
if !ok {
return
}
fmt.Printf("variances = %.4f\n\n", vars)
// Project the data onto the first 2 principal components.
k := 2
var proj mat64.Dense
proj.Mul(iris, vecs.View(0, 0, d, k))
fmt.Printf("proj = %.4f", mat64.Formatted(&proj, mat64.Prefix(" ")))
// Output:
// variances = [0.1666 0.0207 0.0079 0.0019]
//
// proj = ⎡-6.1686 1.4659⎤
// ⎢-5.6767 1.6459⎥
// ⎢-5.6699 1.3642⎥
// ⎢-5.5643 1.3816⎥
// ⎢-6.1734 1.3309⎥
// ⎢-6.7278 1.4021⎥
// ⎢-5.7743 1.1498⎥
// ⎢-6.0466 1.4714⎥
// ⎢-5.2709 1.3570⎥
// ⎣-5.7533 1.6207⎦
}
示例12: Activate
// Activate propagates the given input matrix (with) across the network
// a certain number of times (up to maxIterations).
//
// The with matrix should be size * size elements, with only the values
// of input neurons set (everything else should be zero).
//
// If the network is conceptually organised into layers, maxIterations
// should be set to the number of layers.
//
// This function overwrites whatever's stored in its first argument.
func (n *Network) Activate(with *mat64.Dense, maxIterations int) {
// Add bias and feed to activation
biasFunc := func(r, c int, v float64) float64 {
return v + n.biases[r]
}
activFunc := func(r, c int, v float64) float64 {
return n.funcs[r].Forward(v)
}
tmp := new(mat64.Dense)
tmp.Clone(with)
// Main loop
for i := 0; i < maxIterations; i++ {
with.Mul(n.weights, with)
with.Apply(biasFunc, with)
with.Apply(activFunc, with)
}
}
示例13: Fit
func (lr *LinearRegression) Fit(inst base.FixedDataGrid) error {
// Retrieve row size
_, rows := inst.Size()
// Validate class Attribute count
classAttrs := inst.AllClassAttributes()
if len(classAttrs) != 1 {
return fmt.Errorf("Only 1 class variable is permitted")
}
classAttrSpecs := base.ResolveAttributes(inst, classAttrs)
// Retrieve relevant Attributes
allAttrs := base.NonClassAttributes(inst)
attrs := make([]base.Attribute, 0)
for _, a := range allAttrs {
if _, ok := a.(*base.FloatAttribute); ok {
attrs = append(attrs, a)
}
}
cols := len(attrs) + 1
if rows < cols {
return NotEnoughDataError
}
// Retrieve relevant Attribute specifications
attrSpecs := base.ResolveAttributes(inst, attrs)
// Split into two matrices, observed results (dependent variable y)
// and the explanatory variables (X) - see http://en.wikipedia.org/wiki/Linear_regression
observed := mat64.NewDense(rows, 1, nil)
explVariables := mat64.NewDense(rows, cols, nil)
// Build the observed matrix
inst.MapOverRows(classAttrSpecs, func(row [][]byte, i int) (bool, error) {
val := base.UnpackBytesToFloat(row[0])
observed.Set(i, 0, val)
return true, nil
})
// Build the explainatory variables
inst.MapOverRows(attrSpecs, func(row [][]byte, i int) (bool, error) {
// Set intercepts to 1.0
explVariables.Set(i, 0, 1.0)
for j, r := range row {
explVariables.Set(i, j+1, base.UnpackBytesToFloat(r))
}
return true, nil
})
n := cols
qr := new(mat64.QR)
qr.Factorize(explVariables)
var q, reg mat64.Dense
q.QFromQR(qr)
reg.RFromQR(qr)
var transposed, qty mat64.Dense
transposed.Clone(q.T())
qty.Mul(&transposed, observed)
regressionCoefficients := make([]float64, n)
for i := n - 1; i >= 0; i-- {
regressionCoefficients[i] = qty.At(i, 0)
for j := i + 1; j < n; j++ {
regressionCoefficients[i] -= regressionCoefficients[j] * reg.At(i, j)
}
regressionCoefficients[i] /= reg.At(i, i)
}
lr.disturbance = regressionCoefficients[0]
lr.regressionCoefficients = regressionCoefficients[1:]
lr.fitted = true
lr.attrs = attrs
lr.cls = classAttrs[0]
return nil
}
示例14: Factors
// Factors returns matrices W and H that are non-negative factors of V within the
// specified tolerance and computation limits given initial non-negative solutions Wo
// and Ho.
func Factors(V, Wo, Ho *mat64.Dense, c Config) (W, H *mat64.Dense, ok bool) {
to := time.Now()
W = Wo
H = Ho
var (
wr, wc = W.Dims()
hr, hc = H.Dims()
tmp mat64.Dense
)
var vhT mat64.Dense
gW := mat64.NewDense(wr, wc, nil)
tmp.Mul(H, H.T())
gW.Mul(W, &tmp)
vhT.Mul(V, H.T())
gW.Sub(gW, &vhT)
var wTv mat64.Dense
gH := mat64.NewDense(hr, hc, nil)
tmp.Reset()
tmp.Mul(W.T(), W)
gH.Mul(&tmp, H)
wTv.Mul(W.T(), V)
gH.Sub(gH, &wTv)
var gHT, gWHT mat64.Dense
gHT.Clone(gH.T())
gWHT.Stack(gW, &gHT)
grad := mat64.Norm(&gWHT, 2)
tolW := math.Max(0.001, c.Tolerance) * grad
tolH := tolW
var (
_ok bool
iter int
)
decFiltW := func(r, c int, v float64) float64 {
// decFiltW is applied to gW, so v = gW.At(r, c).
if v < 0 || W.At(r, c) > 0 {
return v
}
return 0
}
decFiltH := func(r, c int, v float64) float64 {
// decFiltH is applied to gH, so v = gH.At(r, c).
if v < 0 || H.At(r, c) > 0 {
return v
}
return 0
}
var vT, hT, wT mat64.Dense
for i := 0; i < c.MaxIter; i++ {
gW.Apply(decFiltW, gW)
gH.Apply(decFiltH, gH)
var proj float64
for _, v := range gW.RawMatrix().Data {
proj += v * v
}
for _, v := range gH.RawMatrix().Data {
proj += v * v
}
proj = math.Sqrt(proj)
if proj < c.Tolerance*grad || time.Now().Sub(to) > c.Limit {
break
}
vT.Clone(V.T())
hT.Clone(H.T())
wT.Clone(W.T())
W, gW, iter, ok = nnlsSubproblem(&vT, &hT, &wT, tolW, c.MaxOuterSub, c.MaxInnerSub)
if iter == 0 {
tolW *= 0.1
}
wT.Reset()
wT.Clone(W.T())
W = &wT
var gWT mat64.Dense
gWT.Clone(gW.T())
*gW = gWT
H, gH, iter, _ok = nnlsSubproblem(V, W, H, tolH, c.MaxOuterSub, c.MaxInnerSub)
ok = ok && _ok
if iter == 0 {
tolH *= 0.1
}
}
//.........这里部分代码省略.........
示例15: nnlsSubproblem
func nnlsSubproblem(V, W, Ho *mat64.Dense, tol float64, outer, inner int) (H, G *mat64.Dense, i int, ok bool) {
H = new(mat64.Dense)
H.Clone(Ho)
var WtV, WtW mat64.Dense
WtV.Mul(W.T(), V)
WtW.Mul(W.T(), W)
alpha, beta := 1., 0.1
decFilt := func(r, c int, v float64) float64 {
// decFilt is applied to G, so v = G.At(r, c).
if v < 0 || H.At(r, c) > 0 {
return v
}
return 0
}
G = new(mat64.Dense)
for i = 0; i < outer; i++ {
G.Mul(&WtW, H)
G.Sub(G, &WtV)
G.Apply(decFilt, G)
if mat64.Norm(G, 2) < tol {
break
}
var (
reduce bool
Hp *mat64.Dense
d, dQ mat64.Dense
)
for j := 0; j < inner; j++ {
var Hn mat64.Dense
Hn.Scale(alpha, G)
Hn.Sub(H, &Hn)
Hn.Apply(posFilt, &Hn)
d.Sub(&Hn, H)
dQ.Mul(&WtW, &d)
dQ.MulElem(&dQ, &d)
d.MulElem(G, &d)
sufficient := 0.99*mat64.Sum(&d)+0.5*mat64.Sum(&dQ) < 0
if j == 0 {
reduce = !sufficient
Hp = H
}
if reduce {
if sufficient {
H = &Hn
ok = true
break
} else {
alpha *= beta
}
} else {
if !sufficient || mat64.Equal(Hp, &Hn) {
H = Hp
break
} else {
alpha /= beta
Hp = &Hn
}
}
}
}
return H, G, i, ok
}