本文整理汇总了Golang中github.com/gonum/matrix/mat64.NewDense函数的典型用法代码示例。如果您正苦于以下问题:Golang NewDense函数的具体用法?Golang NewDense怎么用?Golang NewDense使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewDense函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestUpdate
func TestUpdate(t *testing.T) {
neuralNetwork := CreateSimpleNetwork(t)
inputs := mat64.NewDense(1, 2, []float64{0.05, 0.10})
neuralNetwork.Forward(inputs)
values := mat64.NewDense(1, 2, []float64{0.01, 0.99})
neuralNetwork.Backward(values)
learningConfiguration := neural.LearningConfiguration{
Epochs: proto.Int32(1),
Rate: proto.Float64(0.5),
Decay: proto.Float64(0),
BatchSize: proto.Int32(1),
}
neuralNetwork.Update(learningConfiguration)
expected_weights_0 := mat64.NewDense(
3, 2, []float64{0.149780716, 0.24975114, 0.19956143, 0.29950229, 0.35,
0.35})
if !mat64.EqualApprox(
neuralNetwork.Layers[0].Weight, expected_weights_0, 0.0001) {
t.Errorf("weights 0 unexpected:\n%v",
mat64.Formatted(neuralNetwork.Layers[0].Weight))
}
expected_weights_1 := mat64.NewDense(
3, 2, []float64{0.35891648, 0.51130127, 0.408666186, 0.561370121, 0.6,
0.6})
if !mat64.EqualApprox(
neuralNetwork.Layers[1].Weight, expected_weights_1, 0.0001) {
t.Errorf("weights 1 unexpected:\n%v",
mat64.Formatted(neuralNetwork.Layers[1].Weight))
}
}
示例2: Train
func Train(neuralNetwork *Network, datapoints []Datapoint,
learningConfiguration LearningConfiguration) {
// Train on some number of iterations of permuted versions of the input.
batchSize := int(*learningConfiguration.BatchSize)
// Batch size 0 means do full batch learning.
if batchSize == 0 {
batchSize = len(datapoints)
}
error_function := NewErrorFunction(*learningConfiguration.ErrorName)
features := mat64.NewDense(batchSize, len(datapoints[0].Features), nil)
values := mat64.NewDense(batchSize, len(datapoints[0].Values), nil)
for i := 0; i < int(*learningConfiguration.Epochs); i++ {
perm := rand.Perm(len(datapoints))
// TODO(ariw): This misses the last len(perm) % batchSize examples. Is this
// okay?
for j := 0; j <= len(perm)-batchSize; j += batchSize {
for k := 0; k < batchSize; k++ {
features.SetRow(k, datapoints[perm[j+k]].Features)
values.SetRow(k, datapoints[perm[j+k]].Values)
}
neuralNetwork.Forward(features)
neuralNetwork.Backward(values, error_function)
neuralNetwork.Update(learningConfiguration)
}
}
}
示例3: AfterConstr
// AfterConstr builds and returns matrices representing equality
// constraints with a parameter multiplier matrix A and upper and lower
// bounds. The constraint expresses that each facility can only be built after
// a certain date.
func (s *Scenario) AfterConstr() (A, target *mat64.Dense) {
nperiods := s.nPeriods()
// count facilities that have build time constraints
n := 0
for _, fac := range s.Facs {
if fac.BuildAfter != 0 {
n++
}
}
A = mat64.NewDense(n*nperiods, s.Nvars(), nil)
target = mat64.NewDense(n*nperiods, 1, nil)
r := 0
for f, fac := range s.Facs {
if fac.BuildAfter == 0 {
continue
}
for t := s.BuildPeriod; t < s.SimDur; t += s.BuildPeriod {
if !fac.Available(t) {
c := f*nperiods + t/s.BuildPeriod - 1
A.Set(r, c, 1)
}
r++
}
}
return A, target
}
示例4: SupportConstr
// SupportConstr builds and returns matrices representing linear inequality
// constraints with a parameter multiplier matrix A and upper and lower
// bounds. The constraint expresses that the total number of support
// facilities (i.e. not reactors) at every timestep must never be more
// than twice the number of deployed reactors.
func (s *Scenario) SupportConstr() (low, A, up *mat64.Dense) {
nperiods := s.nPeriods()
A = mat64.NewDense(nperiods, s.Nvars(), nil)
low = mat64.NewDense(nperiods, 1, nil)
tmp := make([]float64, len(s.MaxPower))
copy(tmp, s.MaxPower)
up = mat64.NewDense(nperiods, 1, tmp)
up.Apply(func(r, c int, v float64) float64 { return 1e200 }, up)
for t := s.BuildPeriod; t < s.SimDur; t += s.BuildPeriod {
for f, fac := range s.Facs {
for n := 0; n < nperiods; n++ {
if !fac.Alive(n*s.BuildPeriod+1, t) {
continue
}
i := f*nperiods + n
if fac.Cap == 0 {
A.Set(t/s.BuildPeriod-1, i, -1)
} else {
A.Set(t/s.BuildPeriod-1, i, 2)
}
}
}
}
return low, A, up
}
示例5: TestSGD
/*
* Test the Network for a basic XOR gate.
*/
func TestSGD(t *testing.T) {
var a = []int{2, 3, 1}
var eta float64 = 3
net := Network{}
net.Init(a)
net.TestFunc = func(output, desiredOutput *mat64.Dense) bool {
if math.Abs(output.At(0, 0)-desiredOutput.At(0, 0)) < 0.1 {
return true
}
return false
}
data := make([][]mat64.Dense, 10000)
for i := 0; i < len(data); i++ {
data[i] = make([]mat64.Dense, 2)
rand.Seed(time.Now().UTC().UnixNano())
x := rand.Intn(2)
y := rand.Intn(2)
data[i][0] = *mat64.NewDense(1, 2, []float64{float64(x), float64(y)})
data[i][1] = *mat64.NewDense(1, 1, []float64{float64(x ^ y)})
}
test := make([][]mat64.Dense, 4)
for i := 0; i < 4; i++ {
test[i] = make([]mat64.Dense, 2)
test[i][0] = *mat64.NewDense(1, 2, []float64{float64(i / 2), float64(i % 2)})
test[i][1] = *mat64.NewDense(1, 1, []float64{float64((i / 2) ^ (i % 2))})
}
net.SGD(data, eta, 3, test)
}
示例6: TestEuclidean
func TestEuclidean(t *testing.T) {
var vectorX, vectorY *mat64.Dense
euclidean := NewEuclidean()
Convey("Given two vectors", t, func() {
vectorX = mat64.NewDense(3, 1, []float64{1, 2, 3})
vectorY = mat64.NewDense(3, 1, []float64{2, 4, 5})
Convey("When doing inner product", func() {
result := euclidean.InnerProduct(vectorX, vectorY)
Convey("The result should be 25", func() {
So(result, ShouldEqual, 25)
})
})
Convey("When calculating distance", func() {
result := euclidean.Distance(vectorX, vectorY)
Convey("The result should be 3", func() {
So(result, ShouldEqual, 3)
})
})
})
}
示例7: main
func main() {
// task 1: show qr decomp of wp example
a := mat64.NewDense(3, 3, []float64{
12, -51, 4,
6, 167, -68,
-4, 24, -41,
})
var qr mat64.QR
qr.Factorize(a)
var q, r mat64.Dense
q.QFromQR(&qr)
r.RFromQR(&qr)
fmt.Printf("q: %.3f\n\n", mat64.Formatted(&q, mat64.Prefix(" ")))
fmt.Printf("r: %.3f\n\n", mat64.Formatted(&r, mat64.Prefix(" ")))
// task 2: use qr decomp for polynomial regression example
x := []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
y := []float64{1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321}
a = Vandermonde(x, 2)
b := mat64.NewDense(11, 1, y)
qr.Factorize(a)
var f mat64.Dense
f.SolveQR(&qr, false, b)
fmt.Printf("polyfit: %.3f\n",
mat64.Formatted(&f, mat64.Prefix(" ")))
}
示例8: TestChebyshev
func TestChebyshev(t *testing.T) {
var vectorX, vectorY *mat64.Dense
chebyshev := NewChebyshev()
Convey("Given two vectors", t, func() {
vectorX = mat64.NewDense(4, 1, []float64{1, 2, 3, 4})
vectorY = mat64.NewDense(4, 1, []float64{-5, -6, 7, 8})
Convey("When calculating distance with two vectors", func() {
result := chebyshev.Distance(vectorX, vectorY)
Convey("The result should be 8", func() {
So(result, ShouldEqual, 8)
})
})
Convey("When calculating distance with row vectors", func() {
vectorX.Copy(vectorX.T())
vectorY.Copy(vectorY.T())
result := chebyshev.Distance(vectorX, vectorY)
Convey("The result should be 8", func() {
So(result, ShouldEqual, 8)
})
})
Convey("When calculating distance with different dimension matrices", func() {
vectorX.Clone(vectorX.T())
So(func() { chebyshev.Distance(vectorX, vectorY) }, ShouldPanic)
})
})
}
示例9: Mat64
func (fm *FeatureMatrix) Mat64(header, transpose bool) *mat64.Dense {
var (
idx int
iter fmIt
dense *mat64.Dense
)
ncol := len(fm.Data)
nrow := len(fm.CaseLabels)
if !transpose {
iter = rowIter(fm, header)
dense = mat64.NewDense(nrow, ncol, nil)
} else {
iter = colIter(fm, header)
dense = mat64.NewDense(ncol, nrow+1, nil)
}
for row, ok := iter(); ok; idx++ {
for j, val := range row {
flt, _ := strconv.ParseFloat(val, 64)
dense.Set(idx, j, flt)
}
row, ok = iter()
}
return dense
}
示例10: NewTaskGraphStructure
func NewTaskGraphStructure() *TaskGraphStructure {
return &TaskGraphStructure{
make(map[int]*Task, 0),
mat64.NewDense(0, 0, nil),
mat64.NewDense(0, 0, nil),
}
}
示例11: ReadLibsvm
// ReadLibsvm reads libsvm format data from `filepath`. `oneBased` denotes the
// index of data stored in the file starts from 1 (`oneBased=true`) or 0
// (`oneBased=false`). Returned X, y is of dimension (nSamples, nFeatures) and
// (nSamples, 1) respectively.
func ReadLibsvm(filepath string, oneBased bool) (X, y *mat64.Dense) {
type Data []string
file, err := os.Open(filepath)
if err != nil {
fmt.Println("Got error when trying to open libsvm file")
panic(err)
}
defer file.Close()
nFeatures := 0
nSamples := 0
dataList := []Data{}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
row := strings.Split(scanner.Text(), " ")
dataList = append(dataList, row)
if idx, _ := parseLibsvmElem(row[len(row)-1]); idx+1 > nFeatures {
nFeatures = idx + 1
}
if oneBased {
nFeatures = nFeatures - 1
}
nSamples++
}
X = mat64.NewDense(nSamples, nFeatures, nil)
y = mat64.NewDense(nSamples, 1, nil)
for i, data := range dataList {
label, err := strconv.Atoi(data[0])
if err != nil {
fmt.Println("Got error when trying to set label for %v-th sample", i)
panic(err)
}
y.Set(i, 0, float64(label))
for k := 1; k < len(data); k++ {
idx, val := parseLibsvmElem(data[k])
if oneBased {
X.Set(i, idx-1, float64(val))
} else {
X.Set(i, idx, float64(val))
}
}
}
if err := scanner.Err(); err != nil {
fmt.Println("Got error when trying to read libsvm file")
panic(err)
}
return
}
示例12: TestPolyKernel
func TestPolyKernel(t *testing.T) {
var vectorX, vectorY *mat64.Dense
polyKernel := NewPolyKernel(3)
Convey("Given two vectors", t, func() {
vectorX = mat64.NewDense(3, 1, []float64{1, 2, 3})
vectorY = mat64.NewDense(3, 1, []float64{2, 4, 5})
Convey("When doing inner product", func() {
result := polyKernel.InnerProduct(vectorX, vectorY)
Convey("The result should be 17576", func() {
So(result, ShouldEqual, 17576)
})
})
Convey("When calculating distance", func() {
result := polyKernel.Distance(vectorX, vectorY)
Convey("The result should alomost equal 31.622776601683793", func() {
So(result, ShouldAlmostEqual, 31.622776601683793)
})
})
})
}
示例13: InstancesTrainTestSplit
// InstancesTrainTestSplit takes a given Instances (src) and a train-test fraction
// (prop) and returns an array of two new Instances, one containing approximately
// that fraction and the other containing what's left.
//
// IMPORTANT: this function is only meaningful when prop is between 0.0 and 1.0.
// Using any other values may result in odd behaviour.
func InstancesTrainTestSplit(src *Instances, prop float64) (*Instances, *Instances) {
trainingRows := make([]int, 0)
testingRows := make([]int, 0)
numAttrs := len(src.attributes)
src.Shuffle()
for i := 0; i < src.Rows; i++ {
trainOrTest := rand.Intn(101)
if trainOrTest > int(100*prop) {
trainingRows = append(trainingRows, i)
} else {
testingRows = append(testingRows, i)
}
}
rawTrainMatrix := mat64.NewDense(len(trainingRows), numAttrs, make([]float64, len(trainingRows)*numAttrs))
rawTestMatrix := mat64.NewDense(len(testingRows), numAttrs, make([]float64, len(testingRows)*numAttrs))
for i, row := range trainingRows {
rowDat := src.storage.RowView(row)
rawTrainMatrix.SetRow(i, rowDat)
}
for i, row := range testingRows {
rowDat := src.storage.RowView(row)
rawTestMatrix.SetRow(i, rowDat)
}
trainingRet := NewInstancesFromDense(src.attributes, len(trainingRows), rawTrainMatrix)
testRet := NewInstancesFromDense(src.attributes, len(testingRows), rawTestMatrix)
return trainingRet, testRet
}
示例14: Project
// Project a point on the torus onto the screen.
func (ts TorusScreen) Project(v *mat64.Vector) (uint, uint) {
xUnit, yUnit := ts.pixelSize()
reflectComps := []float64{
1, 0,
0, -1,
}
reflect := mat64.NewDense(2, 2, reflectComps)
trans := Vec2(float64(ts.t.W)/2.0, float64(ts.t.H)/2.0)
// Scaling matrix
scaleComps := []float64{
xUnit, 0,
0, yUnit,
}
scale := mat64.NewDense(2, 2, scaleComps)
pr := Vec2(0, 0)
pr.MulVec(reflect, v)
pr.AddVec(pr, trans)
pr.MulVec(scale, pr)
rx := uint(math.Floor(pr.At(0, 0)))
ry := uint(math.Floor(pr.At(1, 0)))
return rx, ry
}
示例15: LinearLeastSquares
// LinearLeastSquares computes the least squares fit for the function
//
// f(x) = ╬њРѓђtermsРѓђ(x) + ╬њРѓЂtermsРѓЂ(x) + ...
//
// to the data (xs[i], ys[i]). It returns the parameters ╬њРѓђ, ╬њРѓЂ, ...
// that minimize the sum of the squares of the residuals of f:
//
// РѕЉ (ys[i] - f(xs[i]))┬▓
//
// If weights is non-nil, it is used to weight these residuals:
//
// РѕЉ weights[i] ├Ќ (ys[i] - f(xs[i]))┬▓
//
// The function f is specified by one Go function for each linear
// term. For efficiency, the Go function is vectorized: it will be
// passed a slice of x values in xs and must fill the slice termOut
// with the value of the term for each value in xs.
func LinearLeastSquares(xs, ys, weights []float64, terms ...func(xs, termOut []float64)) (params []float64) {
// The optimal parameters are found by solving for ╬њ╠ѓ in the
// "normal equations":
//
// (ЮљЌрхђЮљќЮљЌ)╬њ╠ѓ = ЮљЌрхђЮљќЮљ▓
//
// where Юљќ is a diagonal weight matrix (or the identity matrix
// for the unweighted case).
// TODO: Consider using orthogonal decomposition.
if len(xs) != len(ys) {
panic("len(xs) != len(ys)")
}
if weights != nil && len(xs) != len(weights) {
panic("len(xs) != len(weights")
}
// Construct ЮљЌрхђ. This is the more convenient representation
// for efficiently calling the term functions.
xTVals := make([]float64, len(terms)*len(xs))
for i, term := range terms {
term(xs, xTVals[i*len(xs):i*len(xs)+len(xs)])
}
XT := mat64.NewDense(len(terms), len(xs), xTVals)
X := XT.T()
// Construct ЮљЌрхђЮљќ.
var XTW *mat64.Dense
if weights == nil {
// Юљќ is the identity matrix.
XTW = XT
} else {
// Since Юљќ is a diagonal matrix, we do this directly.
XTW = mat64.DenseCopyOf(XT)
WDiag := mat64.NewVector(len(weights), weights)
for row := 0; row < len(terms); row++ {
rowView := XTW.RowView(row)
rowView.MulElemVec(rowView, WDiag)
}
}
// Construct Юљ▓.
y := mat64.NewVector(len(ys), ys)
// Compute ╬њ╠ѓ.
lhs := mat64.NewDense(len(terms), len(terms), nil)
lhs.Mul(XTW, X)
rhs := mat64.NewVector(len(terms), nil)
rhs.MulVec(XTW, y)
BVals := make([]float64, len(terms))
B := mat64.NewVector(len(terms), BVals)
B.SolveVec(lhs, rhs)
return BVals
}