本文整理汇总了Golang中github.com/gonum/matrix/mat64.Dense.Dims方法的典型用法代码示例。如果您正苦于以下问题:Golang Dense.Dims方法的具体用法?Golang Dense.Dims怎么用?Golang Dense.Dims使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/gonum/matrix/mat64.Dense
的用法示例。
在下文中一共展示了Dense.Dims方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: BatchGradientDescent
// Batch gradient descent finds the local minimum of a function.
// See http://en.wikipedia.org/wiki/Gradient_descent for more details.
func BatchGradientDescent(x, y, theta *mat64.Dense, alpha float64, epoch int) *mat64.Dense {
m, _ := y.Dims()
for i := 0; i < epoch; i++ {
xFlat := mat64.DenseCopyOf(x)
xFlat.TCopy(xFlat)
temp := mat64.DenseCopyOf(x)
// Calculate our best prediction, given theta
temp.Mul(temp, theta)
// Calculate our error from the real values
temp.Sub(temp, y)
xFlat.Mul(xFlat, temp)
// Temporary hack to get around the fact there is no scalar division in mat64
xFlatRow, _ := xFlat.Dims()
gradient := make([]float64, 0)
for k := 0; k < xFlatRow; k++ {
row := xFlat.RowView(k)
for v := range row {
divd := row[v] / float64(m) * alpha
gradient = append(gradient, divd)
}
}
grows := len(gradient)
grad := mat64.NewDense(grows, 1, gradient)
theta.Sub(theta, grad)
}
return theta
}
示例2: Predict
func (nb *NaiveBayes) Predict(X *mat64.Dense) []Prediction {
nSamples, _ := X.Dims()
prediction := []Prediction{}
for i := 0; i < nSamples; i++ {
scores := map[int]float64{}
for langIdx, _ := range nb.params.LangsCount {
scores[langIdx] = nb.tokensProba(X.Row(nil, i), langIdx) + nb.langProba(langIdx)
}
bestScore := scores[0]
bestLangIdx := 0
for langIdx, score := range scores {
if score > bestScore {
bestScore = score
bestLangIdx = langIdx
}
}
prediction = append(prediction, Prediction{
Label: bestLangIdx,
Language: "TODO: PENDING",
Score: bestScore,
})
}
return prediction
}
示例3: GradientDescent
func GradientDescent(X *mat64.Dense, y *mat64.Vector, alpha, tolerance float64, maxIters int) *mat64.Vector {
// m = Number of Training Examples
// n = Number of Features
m, n := X.Dims()
h := mat64.NewVector(m, nil)
partials := mat64.NewVector(n, nil)
new_theta := mat64.NewVector(n, nil)
Regression:
for i := 0; i < maxIters; i++ {
// Calculate partial derivatives
h.MulVec(X, new_theta)
for el := 0; el < m; el++ {
val := (h.At(el, 0) - y.At(el, 0)) / float64(m)
h.SetVec(el, val)
}
partials.MulVec(X.T(), h)
// Update theta values
for el := 0; el < n; el++ {
new_val := new_theta.At(el, 0) - (alpha * partials.At(el, 0))
new_theta.SetVec(el, new_val)
}
// Check the "distance" to the local minumum
dist := math.Sqrt(mat64.Dot(partials, partials))
if dist <= tolerance {
break Regression
}
}
return new_theta
}
示例4: SetScale
func (n *InnerNormal) SetScale(data *mat64.Dense) error {
rows, dim := data.Dims()
if rows < 2 {
return errors.New("scale: less than two inputs")
}
means := make([]float64, dim)
stds := make([]float64, dim)
for i := 0; i < dim; i++ {
// Filter out the extremes
r := data.Col(nil, i)
if len(r) != rows {
panic("bad lengths")
}
sort.Float64s(r)
lowerIdx := int(math.Floor(float64(rows) * n.LowerQuantile))
upperIdx := int(math.Ceil(float64(rows) * n.UpperQuantile))
trimmed := r[lowerIdx:upperIdx]
mean, std := stat.MeanStdDev(trimmed, nil)
//std := stat.StdDev(trimmed, mean, nil)
means[i] = mean
stds[i] = std
}
n.Mu = means
n.Sigma = stds
fmt.Println(n.Mu, n.Sigma)
n.Dim = dim
n.Scaled = true
return nil
}
示例5: StackConstr
func StackConstr(low, A, up *mat64.Dense) (stackA, b *mat64.Dense, ranges []float64) {
neglow := &mat64.Dense{}
neglow.Scale(-1, low)
b = &mat64.Dense{}
b.Stack(up, neglow)
negA := &mat64.Dense{}
negA.Scale(-1, A)
stackA = &mat64.Dense{}
stackA.Stack(A, negA)
// capture the range of each constraint from A because this information is
// lost when converting from "low <= Ax <= up" via stacking to "Ax <= up".
m, _ := A.Dims()
ranges = make([]float64, m, 2*m)
for i := 0; i < m; i++ {
ranges[i] = up.At(i, 0) - low.At(i, 0)
if ranges[i] == 0 {
if up.At(i, 0) == 0 {
ranges[i] = 1
} else {
ranges[i] = up.At(i, 0)
}
}
}
ranges = append(ranges, ranges...)
return stackA, b, ranges
}
示例6: Predict
func (lr *LogisticRegression) Predict(X *mat64.Dense) []Prediction {
nSamples, _ := X.Dims()
prediction := []Prediction{}
for i := 0; i < nSamples; i++ {
scores := liblinear.PredictProba(lr.model, X)
_, nClasses := scores.Dims()
bestScore := scores.At(i, 0)
bestLangIdx := 0
for langIdx := 0; langIdx < nClasses; langIdx++ {
score := scores.At(i, langIdx)
if score > bestScore {
bestScore = score
bestLangIdx = langIdx
}
}
prediction = append(prediction, Prediction{
Label: bestLangIdx,
Language: "TODO: PENDING",
Score: bestScore,
})
}
return prediction
}
示例7: MetropolisHastings
// MetropolisHastings generates rows(batch) samples using the Metropolis Hastings
// algorithm (http://en.wikipedia.org/wiki/Metropolis%E2%80%93Hastings_algorithm),
// with the given target and proposal distributions, starting at the intial location
// and storing the results in-place into samples. If src != nil, it will be used to generate random
// numbers, otherwise rand.Float64 will be used.
//
// Metropolis-Hastings is a Markov-chain Monte Carlo algorithm that generates
// samples according to the distribution specified by target by using the Markov
// chain implicitly defined by the proposal distribution. At each
// iteration, a proposal point is generated randomly from the current location.
// This proposal point is accepted with probability
// p = min(1, (target(new) * proposal(current|new)) / (target(current) * proposal(new|current)))
// If the new location is accepted, it is stored into batch and becomes the
// new current location. If it is rejected, the current location remains and
// is stored into samples. Thus, a location is stored into batch at every iteration.
//
// The samples in Metropolis Hastings are correlated with one another through the
// Markov chain. As a result, the initial value can have a significant influence
// on the early samples, and so, typically, the first samples generated by the chain
// are ignored. This is known as "burn-in", and can be accomplished with slicing.
// The best choice for burn-in length will depend on the sampling and target
// distributions.
//
// Many choose to have a sampling "rate" where a number of samples
// are ignored in between each kept sample. This helps decorrelate
// the samples from one another, but also reduces the number of available samples.
// A sampling rate can be implemented with successive calls to MetropolisHastings.
func MetropolisHastings(batch *mat64.Dense, initial []float64, target distmv.LogProber, proposal MHProposal, src *rand.Rand) {
f64 := rand.Float64
if src != nil {
f64 = src.Float64
}
if len(initial) == 0 {
panic("metropolishastings: zero length initial")
}
r, _ := batch.Dims()
current := make([]float64, len(initial))
copy(current, initial)
proposed := make([]float64, len(initial))
currentLogProb := target.LogProb(initial)
for i := 0; i < r; i++ {
proposal.ConditionalRand(proposed, current)
proposedLogProb := target.LogProb(proposed)
probTo := proposal.ConditionalLogProb(proposed, current)
probBack := proposal.ConditionalLogProb(current, proposed)
accept := math.Exp(proposedLogProb + probBack - probTo - currentLogProb)
if accept > f64() {
copy(current, proposed)
currentLogProb = proposedLogProb
}
batch.SetRow(i, current)
}
}
示例8: Fit
func (nb *NaiveBayes) Fit(X, y *mat64.Dense) {
nSamples, nFeatures := X.Dims()
tokensTotal := 0
langsTotal, _ := y.Dims()
langsCount := histogram(y.Col(nil, 0))
tokensTotalPerLang := map[int]int{}
tokenCountPerLang := map[int](map[int]int){}
for i := 0; i < nSamples; i++ {
langIdx := int(y.At(i, 0))
for j := 0; j < nFeatures; j++ {
tokensTotal += int(X.At(i, j))
tokensTotalPerLang[langIdx] += int(X.At(i, j))
if _, ok := tokenCountPerLang[langIdx]; !ok {
tokenCountPerLang[langIdx] = map[int]int{}
}
tokenCountPerLang[langIdx][j] += int(X.At(i, j))
}
}
params := nbParams{
TokensTotal: tokensTotal,
LangsTotal: langsTotal,
LangsCount: langsCount,
TokensTotalPerLang: tokensTotalPerLang,
TokenCountPerLang: tokenCountPerLang,
}
nb.params = params
}
示例9: GcvInitCameraMatrix2D
// GcvInitCameraMatrix2D takes one 3-by-N matrix and one 2-by-N Matrix as input.
// Each column in the input matrix represents a point in real world (objPts) or
// in image (imgPts).
// Return: the camera matrix.
func GcvInitCameraMatrix2D(objPts, imgPts *mat64.Dense, dims [2]int,
aspectRatio float64) (camMat *mat64.Dense) {
objDim, nObjPts := objPts.Dims()
imgDim, nImgPts := imgPts.Dims()
if objDim != 3 || imgDim != 2 || nObjPts != nImgPts {
panic("Invalid dimensions for objPts and imgPts")
}
objPtsVec := NewGcvPoint3f32Vector(int64(nObjPts))
imgPtsVec := NewGcvPoint2f32Vector(int64(nObjPts))
for j := 0; j < nObjPts; j++ {
objPtsVec.Set(j, NewGcvPoint3f32(mat64.Col(nil, j, objPts)...))
}
for j := 0; j < nObjPts; j++ {
imgPtsVec.Set(j, NewGcvPoint2f32(mat64.Col(nil, j, imgPts)...))
}
_imgSize := NewGcvSize2i(dims[0], dims[1])
camMat = GcvMatToMat64(GcvInitCameraMatrix2D_(
objPtsVec, imgPtsVec, _imgSize, aspectRatio))
return camMat
}
示例10: LatinHypercube
// LatinHypercube generates rows(batch) samples using Latin hypercube sampling
// from the given distribution. If src is not nil, it will be used to generate
// random numbers, otherwise rand.Float64 will be used.
//
// Latin hypercube sampling divides the cumulative distribution function into equally
// spaced bins and guarantees that one sample is generated per bin. Within each bin,
// the location is randomly sampled. The distmv.UnitNormal variable can be used
// for easy generation from the unit interval.
func LatinHypercube(batch *mat64.Dense, q distmv.Quantiler, src *rand.Rand) {
r, c := batch.Dims()
var f64 func() float64
var perm func(int) []int
if src != nil {
f64 = src.Float64
perm = src.Perm
} else {
f64 = rand.Float64
perm = rand.Perm
}
r64 := float64(r)
for i := 0; i < c; i++ {
p := perm(r)
for j := 0; j < r; j++ {
var v float64
v = f64()
v = v/r64 + float64(j)/r64
batch.Set(p[j], i, v)
}
}
p := make([]float64, c)
for i := 0; i < r; i++ {
copy(p, batch.RawRowView(i))
q.Quantile(batch.RawRowView(i), p)
}
}
示例11: toFeatureNodes
func toFeatureNodes(X *mat64.Dense) []*C.struct_feature_node {
featureNodes := []*C.struct_feature_node{}
nRows, nCols := X.Dims()
for i := 0; i < nRows; i++ {
row := []C.struct_feature_node{}
for j := 0; j < nCols; j++ {
val := X.At(i, j)
if val != 0 {
row = append(row, C.struct_feature_node{
index: C.int(j + 1),
value: C.double(val),
})
}
}
row = append(row, C.struct_feature_node{
index: C.int(-1),
value: C.double(0),
})
featureNodes = append(featureNodes, &row[0])
}
return featureNodes
}
示例12: DfFromMat
func DfFromMat(mat *mat64.Dense) *DataFrame {
rows, cols := mat.Dims()
return &DataFrame{
data: mat,
rows: rows,
cols: cols,
}
}
示例13: rowSum
func rowSum(matrix *mat64.Dense, rowId int) float64 {
_, col := matrix.Dims()
sum := float64(0)
for c := 0; c < col; c++ {
sum += matrix.At(rowId, c)
}
return sum
}
示例14: colSum
func colSum(matrix *mat64.Dense, colId int) float64 {
row, _ := matrix.Dims()
sum := float64(0)
for r := 0; r < row; r++ {
sum += matrix.At(r, colId)
}
return sum
}
示例15: Predict
// double predict(const struct model *model_, const struct feature_node *x);
func Predict(model *Model, X *mat64.Dense) *mat64.Dense {
nRows, nCols := X.Dims()
cX := mapCDouble(X.RawMatrix().Data)
y := mat64.NewDense(nRows, 1, nil)
result := doubleToFloats(C.call_predict(
model.cModel, &cX[0], C.int(nRows), C.int(nCols)), nRows)
y.SetCol(0, result)
return y
}