本文整理汇总了Golang中github.com/gonum/matrix/mat64.Dense.Copy方法的典型用法代码示例。如果您正苦于以下问题:Golang Dense.Copy方法的具体用法?Golang Dense.Copy怎么用?Golang Dense.Copy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/gonum/matrix/mat64.Dense
的用法示例。
在下文中一共展示了Dense.Copy方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestChebyshev
func TestChebyshev(t *testing.T) {
var vectorX, vectorY *mat64.Dense
chebyshev := NewChebyshev()
Convey("Given two vectors", t, func() {
vectorX = mat64.NewDense(4, 1, []float64{1, 2, 3, 4})
vectorY = mat64.NewDense(4, 1, []float64{-5, -6, 7, 8})
Convey("When calculating distance with two vectors", func() {
result := chebyshev.Distance(vectorX, vectorY)
Convey("The result should be 8", func() {
So(result, ShouldEqual, 8)
})
})
Convey("When calculating distance with row vectors", func() {
vectorX.Copy(vectorX.T())
vectorY.Copy(vectorY.T())
result := chebyshev.Distance(vectorX, vectorY)
Convey("The result should be 8", func() {
So(result, ShouldEqual, 8)
})
})
Convey("When calculating distance with different dimension matrices", func() {
vectorX.Clone(vectorX.T())
So(func() { chebyshev.Distance(vectorX, vectorY) }, ShouldPanic)
})
})
}
示例2: TestCranberrra
func TestCranberrra(t *testing.T) {
var vectorX, vectorY *mat64.Dense
cranberra := NewCranberra()
Convey("Given two vectors that are same", t, func() {
vec := mat64.NewDense(7, 1, []float64{0, 1, -2, 3.4, 5, -6.7, 89})
distance := cranberra.Distance(vec, vec)
Convey("The result should be 0", func() {
So(distance, ShouldEqual, 0)
})
})
Convey("Given two vectors", t, func() {
vectorX = mat64.NewDense(5, 1, []float64{1, 2, 3, 4, 9})
vectorY = mat64.NewDense(5, 1, []float64{-5, -6, 7, 4, 3})
Convey("When calculating distance with two vectors", func() {
result := cranberra.Distance(vectorX, vectorY)
Convey("The result should be 2.9", func() {
So(result, ShouldEqual, 2.9)
})
})
Convey("When calculating distance with row vectors", func() {
vectorX.Copy(vectorX.T())
vectorY.Copy(vectorY.T())
result := cranberra.Distance(vectorX, vectorY)
Convey("The result should be 2.9", func() {
So(result, ShouldEqual, 2.9)
})
})
Convey("When calculating distance with different dimension matrices", func() {
vectorX.Clone(vectorX.T())
So(func() { cranberra.Distance(vectorX, vectorY) }, ShouldPanic)
})
})
}
示例3: TestManhattan
func TestManhattan(t *testing.T) {
var vectorX, vectorY *mat64.Dense
manhattan := NewManhattan()
Convey("Given two vectors that are same", t, func() {
vec := mat64.NewDense(7, 1, []float64{0, 1, -2, 3.4, 5, -6.7, 89})
distance := manhattan.Distance(vec, vec)
Convey("The result should be 0", func() {
So(distance, ShouldEqual, 0)
})
})
Convey("Given two vectors", t, func() {
vectorX = mat64.NewDense(3, 1, []float64{2, 2, 3})
vectorY = mat64.NewDense(3, 1, []float64{1, 4, 5})
Convey("When calculating distance with column vectors", func() {
result := manhattan.Distance(vectorX, vectorY)
Convey("The result should be 5", func() {
So(result, ShouldEqual, 5)
})
})
Convey("When calculating distance with row vectors", func() {
vectorX.Copy(vectorX.T())
vectorY.Copy(vectorY.T())
result := manhattan.Distance(vectorX, vectorY)
Convey("The result should be 5", func() {
So(result, ShouldEqual, 5)
})
})
Convey("When calculating distance with different dimension matrices", func() {
vectorX.Clone(vectorX.T())
So(func() { manhattan.Distance(vectorX, vectorY) }, ShouldPanic)
})
})
}
示例4: LinearSolve
// LinearSolve trains a Linear algorithm.
// Assumes inputs and outputs are already scaled
// If features is nil will call featurize
// Will return nil if regularizer is not a linear regularizer
// Is destructive if any of the weights are zero
// Losser is always the two-norm
// Does not set the value of the parameters (in case this is called in parallel with a different routine)
func LinearSolve(linearTrainable LinearTrainable, features *mat64.Dense, inputs, trueOutputs common.RowMatrix,
weights []float64, regularizer regularize.Regularizer) (parameters []float64) {
// TODO: Allow tikhonov regularization
// TODO: Add test for weights
// TODO: Need to do something about returning a []float64
if !IsLinearSolveRegularizer(regularizer) {
return nil
}
if features == nil {
features = FeaturizeTrainable(linearTrainable, inputs, features)
}
_, nFeatures := features.Dims()
var weightedFeatures, weightedOutput *mat64.Dense
fmt.Println("In linear solve")
if weights != nil {
panic("Need functionality to be better. Either banded special case in matrix or do the mulitplication by hand")
scaledWeight := make([]float64, len(weights))
for i, weight := range weights {
scaledWeight[i] = math.Sqrt(weight)
}
diagWeight := diagonal.NewDiagonal(len(scaledWeight), scaledWeight)
nSamples, outputDim := trueOutputs.Dims()
weightedOutput = mat64.NewDense(nSamples, outputDim, nil)
weightedFeatures = mat64.NewDense(nSamples, nFeatures, nil)
weightedOutput.Copy(trueOutputs)
weightedFeatures.Copy(features)
// TODO: Replace this with better than mat multiply
weightedOutput.Mul(diagWeight, weightedOutput)
weightedFeatures.Mul(diagWeight, weightedFeatures)
}
switch regularizer.(type) {
case nil:
case regularize.None:
default:
panic("Shouldn't be here. Must be error in IsLinearRegularizer")
}
if weights == nil {
parameterMat, err := mat64.Solve(features, trueOutputs)
if err != nil {
panic(err)
}
return parameterMat.RawMatrix().Data
}
parameterMat, err := mat64.Solve(weightedFeatures, weightedOutput)
if err != nil {
panic(err)
}
return parameterMat.RawMatrix().Data
}