本文整理汇总了Golang中github.com/huichen/mlf/util.Matrix.Increment方法的典型用法代码示例。如果您正苦于以下问题:Golang Matrix.Increment方法的具体用法?Golang Matrix.Increment怎么用?Golang Matrix.Increment使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/huichen/mlf/util.Matrix
的用法示例。
在下文中一共展示了Matrix.Increment方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: OptimizeWeights
func (opt *gdOptimizer) OptimizeWeights(
weights *util.Matrix, derivative_func ComputeInstanceDerivativeFunc, set data.Dataset) {
// 偏导数向量
derivative := weights.Populate()
// 学习率计算器
learningRate := NewLearningRate(opt.options)
// 优化循环
iterator := set.CreateIterator()
step := 0
var learning_rate float64
convergingSteps := 0
oldWeights := weights.Populate()
weightsDelta := weights.Populate()
instanceDerivative := weights.Populate()
log.Print("开始梯度递降优化")
for {
if opt.options.MaxIterations > 0 && step >= opt.options.MaxIterations {
break
}
step++
// 每次遍历样本前对偏导数向量清零
derivative.Clear()
// 遍历所有样本,计算偏导数向量并累加
iterator.Start()
instancesProcessed := 0
for !iterator.End() {
instance := iterator.GetInstance()
derivative_func(weights, instance, instanceDerivative)
derivative.Increment(instanceDerivative, 1.0/float64(set.NumInstances()))
iterator.Next()
instancesProcessed++
if opt.options.GDBatchSize > 0 && instancesProcessed >= opt.options.GDBatchSize {
// 添加正则化项
derivative.Increment(ComputeRegularization(weights, opt.options),
float64(instancesProcessed)/(float64(set.NumInstances())*float64(set.NumInstances())))
// 计算特征权重的增量
delta := opt.GetDeltaX(weights, derivative)
// 根据学习率更新权重
learning_rate = learningRate.ComputeLearningRate(delta)
weights.Increment(delta, learning_rate)
// 重置
derivative.Clear()
instancesProcessed = 0
}
}
if instancesProcessed > 0 {
// 处理剩余的样本
derivative.Increment(ComputeRegularization(weights, opt.options),
float64(instancesProcessed)/(float64(set.NumInstances())*float64(set.NumInstances())))
delta := opt.GetDeltaX(weights, derivative)
learning_rate = learningRate.ComputeLearningRate(delta)
weights.Increment(delta, learning_rate)
}
weightsDelta.WeightedSum(weights, oldWeights, 1, -1)
oldWeights.DeepCopy(weights)
weightsNorm := weights.Norm()
weightsDeltaNorm := weightsDelta.Norm()
log.Printf("#%d |w|=%1.3g |dw|/|w|=%1.3g lr=%1.3g", step, weightsNorm, weightsDeltaNorm/weightsNorm, learning_rate)
// 判断是否溢出
if math.IsNaN(weightsNorm) {
log.Fatal("优化失败:不收敛")
}
// 判断是否收敛
if weightsDelta.Norm()/weights.Norm() < opt.options.ConvergingDeltaWeight {
convergingSteps++
if convergingSteps > opt.options.ConvergingSteps {
log.Printf("收敛")
break
}
}
}
}
示例2: OptimizeWeights
func (opt *lbfgsOptimizer) OptimizeWeights(
weights *util.Matrix, derivative_func ComputeInstanceDerivativeFunc, set data.Dataset) {
// 学习率计算器
learningRate := NewLearningRate(opt.options)
// 偏导数向量
derivative := weights.Populate()
// 优化循环
step := 0
convergingSteps := 0
oldWeights := weights.Populate()
weightsDelta := weights.Populate()
// 为各个工作协程开辟临时资源
numLbfgsThreads := *lbfgs_threads
if numLbfgsThreads == 0 {
numLbfgsThreads = runtime.NumCPU()
}
workerSet := make([]data.Dataset, numLbfgsThreads)
workerDerivative := make([]*util.Matrix, numLbfgsThreads)
workerInstanceDerivative := make([]*util.Matrix, numLbfgsThreads)
for iWorker := 0; iWorker < numLbfgsThreads; iWorker++ {
workerBuckets := []data.SkipBucket{
{true, iWorker},
{false, 1},
{true, numLbfgsThreads - 1 - iWorker},
}
workerSet[iWorker] = data.NewSkipDataset(set, workerBuckets)
workerDerivative[iWorker] = weights.Populate()
workerInstanceDerivative[iWorker] = weights.Populate()
}
log.Print("开始L-BFGS优化")
for {
if opt.options.MaxIterations > 0 && step >= opt.options.MaxIterations {
break
}
step++
// 开始工作协程
workerChannel := make(chan int, numLbfgsThreads)
for iWorker := 0; iWorker < numLbfgsThreads; iWorker++ {
go func(iw int) {
workerDerivative[iw].Clear()
iterator := workerSet[iw].CreateIterator()
iterator.Start()
for !iterator.End() {
instance := iterator.GetInstance()
derivative_func(
weights, instance, workerInstanceDerivative[iw])
// log.Print(workerInstanceDerivative[iw].GetValues(0))
workerDerivative[iw].Increment(
workerInstanceDerivative[iw], float64(1)/float64(set.NumInstances()))
iterator.Next()
}
workerChannel <- iw
}(iWorker)
}
derivative.Clear()
// 等待工作协程结束
for iWorker := 0; iWorker < numLbfgsThreads; iWorker++ {
<-workerChannel
}
for iWorker := 0; iWorker < numLbfgsThreads; iWorker++ {
derivative.Increment(workerDerivative[iWorker], 1)
}
// 添加正则化项
derivative.Increment(ComputeRegularization(weights, opt.options), 1.0/float64(set.NumInstances()))
// 计算特征权重的增量
delta := opt.GetDeltaX(weights, derivative)
// 根据学习率更新权重
learning_rate := learningRate.ComputeLearningRate(delta)
weights.Increment(delta, learning_rate)
weightsDelta.WeightedSum(weights, oldWeights, 1, -1)
oldWeights.DeepCopy(weights)
weightsNorm := weights.Norm()
weightsDeltaNorm := weightsDelta.Norm()
log.Printf("#%d |dw|/|w|=%f |w|=%f lr=%1.3g", step, weightsDeltaNorm/weightsNorm, weightsNorm, learning_rate)
// 判断是否溢出
if math.IsNaN(weightsNorm) {
log.Fatal("优化失败:不收敛")
}
// 判断是否收敛
if weightsDeltaNorm/weightsNorm < opt.options.ConvergingDeltaWeight {
convergingSteps++
if convergingSteps > opt.options.ConvergingSteps {
log.Printf("收敛")
break
}
} else {
//.........这里部分代码省略.........