本文整理汇总了Golang中github.com/gonum/matrix/mat64.Dense类的典型用法代码示例。如果您正苦于以下问题:Golang Dense类的具体用法?Golang Dense怎么用?Golang Dense使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Dense类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: SetScale
func (n *InnerNormal) SetScale(data *mat64.Dense) error {
rows, dim := data.Dims()
if rows < 2 {
return errors.New("scale: less than two inputs")
}
means := make([]float64, dim)
stds := make([]float64, dim)
for i := 0; i < dim; i++ {
// Filter out the extremes
r := data.Col(nil, i)
if len(r) != rows {
panic("bad lengths")
}
sort.Float64s(r)
lowerIdx := int(math.Floor(float64(rows) * n.LowerQuantile))
upperIdx := int(math.Ceil(float64(rows) * n.UpperQuantile))
trimmed := r[lowerIdx:upperIdx]
mean, std := stat.MeanStdDev(trimmed, nil)
//std := stat.StdDev(trimmed, mean, nil)
means[i] = mean
stds[i] = std
}
n.Mu = means
n.Sigma = stds
fmt.Println(n.Mu, n.Sigma)
n.Dim = dim
n.Scaled = true
return nil
}
示例2: Predict
func (nb *NaiveBayes) Predict(X *mat64.Dense) []Prediction {
nSamples, _ := X.Dims()
prediction := []Prediction{}
for i := 0; i < nSamples; i++ {
scores := map[int]float64{}
for langIdx, _ := range nb.params.LangsCount {
scores[langIdx] = nb.tokensProba(X.Row(nil, i), langIdx) + nb.langProba(langIdx)
}
bestScore := scores[0]
bestLangIdx := 0
for langIdx, score := range scores {
if score > bestScore {
bestScore = score
bestLangIdx = langIdx
}
}
prediction = append(prediction, Prediction{
Label: bestLangIdx,
Language: "TODO: PENDING",
Score: bestScore,
})
}
return prediction
}
示例3: GcvInitCameraMatrix2D
// GcvInitCameraMatrix2D takes one 3-by-N matrix and one 2-by-N Matrix as input.
// Each column in the input matrix represents a point in real world (objPts) or
// in image (imgPts).
// Return: the camera matrix.
func GcvInitCameraMatrix2D(objPts, imgPts *mat64.Dense, dims [2]int,
aspectRatio float64) (camMat *mat64.Dense) {
objDim, nObjPts := objPts.Dims()
imgDim, nImgPts := imgPts.Dims()
if objDim != 3 || imgDim != 2 || nObjPts != nImgPts {
panic("Invalid dimensions for objPts and imgPts")
}
objPtsVec := NewGcvPoint3f32Vector(int64(nObjPts))
imgPtsVec := NewGcvPoint2f32Vector(int64(nObjPts))
for j := 0; j < nObjPts; j++ {
objPtsVec.Set(j, NewGcvPoint3f32(mat64.Col(nil, j, objPts)...))
}
for j := 0; j < nObjPts; j++ {
imgPtsVec.Set(j, NewGcvPoint2f32(mat64.Col(nil, j, imgPts)...))
}
_imgSize := NewGcvSize2i(dims[0], dims[1])
camMat = GcvMatToMat64(GcvInitCameraMatrix2D_(
objPtsVec, imgPtsVec, _imgSize, aspectRatio))
return camMat
}
示例4: StackConstr
func StackConstr(low, A, up *mat64.Dense) (stackA, b *mat64.Dense, ranges []float64) {
neglow := &mat64.Dense{}
neglow.Scale(-1, low)
b = &mat64.Dense{}
b.Stack(up, neglow)
negA := &mat64.Dense{}
negA.Scale(-1, A)
stackA = &mat64.Dense{}
stackA.Stack(A, negA)
// capture the range of each constraint from A because this information is
// lost when converting from "low <= Ax <= up" via stacking to "Ax <= up".
m, _ := A.Dims()
ranges = make([]float64, m, 2*m)
for i := 0; i < m; i++ {
ranges[i] = up.At(i, 0) - low.At(i, 0)
if ranges[i] == 0 {
if up.At(i, 0) == 0 {
ranges[i] = 1
} else {
ranges[i] = up.At(i, 0)
}
}
}
ranges = append(ranges, ranges...)
return stackA, b, ranges
}
示例5: toFeatureNodes
func toFeatureNodes(X *mat64.Dense) []*C.struct_feature_node {
featureNodes := []*C.struct_feature_node{}
nRows, nCols := X.Dims()
for i := 0; i < nRows; i++ {
row := []C.struct_feature_node{}
for j := 0; j < nCols; j++ {
val := X.At(i, j)
if val != 0 {
row = append(row, C.struct_feature_node{
index: C.int(j + 1),
value: C.double(val),
})
}
}
row = append(row, C.struct_feature_node{
index: C.int(-1),
value: C.double(0),
})
featureNodes = append(featureNodes, &row[0])
}
return featureNodes
}
示例6: Mat64
func (fm *FeatureMatrix) Mat64(header, transpose bool) *mat64.Dense {
var (
idx int
iter fmIt
dense *mat64.Dense
)
ncol := len(fm.Data)
nrow := len(fm.CaseLabels)
if !transpose {
iter = rowIter(fm, header)
dense = mat64.NewDense(nrow, ncol, nil)
} else {
iter = colIter(fm, header)
dense = mat64.NewDense(ncol, nrow+1, nil)
}
for row, ok := iter(); ok; idx++ {
for j, val := range row {
flt, _ := strconv.ParseFloat(val, 64)
dense.Set(idx, j, flt)
}
row, ok = iter()
}
return dense
}
示例7: BatchGradientDescent
// Batch gradient descent finds the local minimum of a function.
// See http://en.wikipedia.org/wiki/Gradient_descent for more details.
func BatchGradientDescent(x, y, theta *mat64.Dense, alpha float64, epoch int) *mat64.Dense {
m, _ := y.Dims()
for i := 0; i < epoch; i++ {
xFlat := mat64.DenseCopyOf(x)
xFlat.TCopy(xFlat)
temp := mat64.DenseCopyOf(x)
// Calculate our best prediction, given theta
temp.Mul(temp, theta)
// Calculate our error from the real values
temp.Sub(temp, y)
xFlat.Mul(xFlat, temp)
// Temporary hack to get around the fact there is no scalar division in mat64
xFlatRow, _ := xFlat.Dims()
gradient := make([]float64, 0)
for k := 0; k < xFlatRow; k++ {
row := xFlat.RowView(k)
for v := range row {
divd := row[v] / float64(m) * alpha
gradient = append(gradient, divd)
}
}
grows := len(gradient)
grad := mat64.NewDense(grows, 1, gradient)
theta.Sub(theta, grad)
}
return theta
}
示例8: FeaturizeTrainable
// Creates the features from the inputs. Features must be nSamples x nFeatures or nil
func FeaturizeTrainable(t Trainable, inputs common.RowMatrix, featurizedInputs *mat64.Dense) *mat64.Dense {
nSamples, nDim := inputs.Dims()
if featurizedInputs == nil {
nFeatures := t.NumFeatures()
featurizedInputs = mat64.NewDense(nSamples, nFeatures, nil)
}
rowViewer, isRowViewer := inputs.(mat64.RowViewer)
var f func(start, end int)
if isRowViewer {
f = func(start, end int) {
featurizer := t.NewFeaturizer()
for i := start; i < end; i++ {
featurizer.Featurize(rowViewer.RowView(i), featurizedInputs.RowView(i))
}
}
} else {
f = func(start, end int) {
featurizer := t.NewFeaturizer()
input := make([]float64, nDim)
for i := start; i < end; i++ {
inputs.Row(input, i)
featurizer.Featurize(input, featurizedInputs.RowView(i))
}
}
}
common.ParallelFor(nSamples, common.GetGrainSize(nSamples, minGrain, maxGrain), f)
return featurizedInputs
}
示例9: GradientDescent
func GradientDescent(X *mat64.Dense, y *mat64.Vector, alpha, tolerance float64, maxIters int) *mat64.Vector {
// m = Number of Training Examples
// n = Number of Features
m, n := X.Dims()
h := mat64.NewVector(m, nil)
partials := mat64.NewVector(n, nil)
new_theta := mat64.NewVector(n, nil)
Regression:
for i := 0; i < maxIters; i++ {
// Calculate partial derivatives
h.MulVec(X, new_theta)
for el := 0; el < m; el++ {
val := (h.At(el, 0) - y.At(el, 0)) / float64(m)
h.SetVec(el, val)
}
partials.MulVec(X.T(), h)
// Update theta values
for el := 0; el < n; el++ {
new_val := new_theta.At(el, 0) - (alpha * partials.At(el, 0))
new_theta.SetVec(el, new_val)
}
// Check the "distance" to the local minumum
dist := math.Sqrt(mat64.Dot(partials, partials))
if dist <= tolerance {
break Regression
}
}
return new_theta
}
示例10: MassCenter
//MassCenter centers in in the center of mass of oref. Mass must be
//A column vector. Returns the centered matrix and the displacement matrix.
func MassCenter(in, oref *v3.Matrix, mass *mat64.Dense) (*v3.Matrix, *v3.Matrix, error) {
or, _ := oref.Dims()
ir, _ := in.Dims()
if mass == nil { //just obtain the geometric center
tmp := ones(or)
mass = mat64.NewDense(or, 1, tmp) //gnOnes(or, 1)
}
ref := v3.Zeros(or)
ref.Copy(oref)
gnOnesvector := gnOnes(1, or)
f := func() { ref.ScaleByCol(ref, mass) }
if err := gnMaybe(gnPanicker(f)); err != nil {
return nil, nil, CError{err.Error(), []string{"v3.Matrix.ScaleByCol", "MassCenter"}}
}
ref2 := v3.Zeros(1)
g := func() { ref2.Mul(gnOnesvector, ref) }
if err := gnMaybe(gnPanicker(g)); err != nil {
return nil, nil, CError{err.Error(), []string{"v3.gOnesVector", "MassCenter"}}
}
ref2.Scale(1.0/mass.Sum(), ref2)
returned := v3.Zeros(ir)
returned.Copy(in)
returned.SubVec(returned, ref2)
/* for i := 0; i < ir; i++ {
if err := returned.GetRowVector(i).Subtract(ref2); err != nil {
return nil, nil, err
}
}
*/
return returned, ref2, nil
}
示例11: Predict
func (lr *LogisticRegression) Predict(X *mat64.Dense) []Prediction {
nSamples, _ := X.Dims()
prediction := []Prediction{}
for i := 0; i < nSamples; i++ {
scores := liblinear.PredictProba(lr.model, X)
_, nClasses := scores.Dims()
bestScore := scores.At(i, 0)
bestLangIdx := 0
for langIdx := 0; langIdx < nClasses; langIdx++ {
score := scores.At(i, langIdx)
if score > bestScore {
bestScore = score
bestLangIdx = langIdx
}
}
prediction = append(prediction, Prediction{
Label: bestLangIdx,
Language: "TODO: PENDING",
Score: bestScore,
})
}
return prediction
}
示例12: MetropolisHastings
// MetropolisHastings generates rows(batch) samples using the Metropolis Hastings
// algorithm (http://en.wikipedia.org/wiki/Metropolis%E2%80%93Hastings_algorithm),
// with the given target and proposal distributions, starting at the intial location
// and storing the results in-place into samples. If src != nil, it will be used to generate random
// numbers, otherwise rand.Float64 will be used.
//
// Metropolis-Hastings is a Markov-chain Monte Carlo algorithm that generates
// samples according to the distribution specified by target by using the Markov
// chain implicitly defined by the proposal distribution. At each
// iteration, a proposal point is generated randomly from the current location.
// This proposal point is accepted with probability
// p = min(1, (target(new) * proposal(current|new)) / (target(current) * proposal(new|current)))
// If the new location is accepted, it is stored into batch and becomes the
// new current location. If it is rejected, the current location remains and
// is stored into samples. Thus, a location is stored into batch at every iteration.
//
// The samples in Metropolis Hastings are correlated with one another through the
// Markov chain. As a result, the initial value can have a significant influence
// on the early samples, and so, typically, the first samples generated by the chain
// are ignored. This is known as "burn-in", and can be accomplished with slicing.
// The best choice for burn-in length will depend on the sampling and target
// distributions.
//
// Many choose to have a sampling "rate" where a number of samples
// are ignored in between each kept sample. This helps decorrelate
// the samples from one another, but also reduces the number of available samples.
// A sampling rate can be implemented with successive calls to MetropolisHastings.
func MetropolisHastings(batch *mat64.Dense, initial []float64, target distmv.LogProber, proposal MHProposal, src *rand.Rand) {
f64 := rand.Float64
if src != nil {
f64 = src.Float64
}
if len(initial) == 0 {
panic("metropolishastings: zero length initial")
}
r, _ := batch.Dims()
current := make([]float64, len(initial))
copy(current, initial)
proposed := make([]float64, len(initial))
currentLogProb := target.LogProb(initial)
for i := 0; i < r; i++ {
proposal.ConditionalRand(proposed, current)
proposedLogProb := target.LogProb(proposed)
probTo := proposal.ConditionalLogProb(proposed, current)
probBack := proposal.ConditionalLogProb(current, proposed)
accept := math.Exp(proposedLogProb + probBack - probTo - currentLogProb)
if accept > f64() {
copy(current, proposed)
currentLogProb = proposedLogProb
}
batch.SetRow(i, current)
}
}
示例13: InnerProduct
// InnerProduct computes the inner product through a kernel trick
// K(x, y) = (x^T y + 1)^d
func (p *PolyKernel) InnerProduct(vectorX *mat64.Dense, vectorY *mat64.Dense) float64 {
subVectorX := vectorX.ColView(0)
subVectorY := vectorY.ColView(0)
result := mat64.Dot(subVectorX, subVectorY)
result = math.Pow(result+1, float64(p.degree))
return result
}
示例14: DfFromMat
func DfFromMat(mat *mat64.Dense) *DataFrame {
rows, cols := mat.Dims()
return &DataFrame{
data: mat,
rows: rows,
cols: cols,
}
}
示例15: rowSum
func rowSum(matrix *mat64.Dense, rowId int) float64 {
_, col := matrix.Dims()
sum := float64(0)
for c := 0; c < col; c++ {
sum += matrix.At(rowId, c)
}
return sum
}