本文整理匯總了Golang中github.com/sjwhitworth/golearn/base.Instances.GetClassDistributionAfterSplit方法的典型用法代碼示例。如果您正苦於以下問題:Golang Instances.GetClassDistributionAfterSplit方法的具體用法?Golang Instances.GetClassDistributionAfterSplit怎麽用?Golang Instances.GetClassDistributionAfterSplit使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/sjwhitworth/golearn/base.Instances
的用法示例。
在下文中一共展示了Instances.GetClassDistributionAfterSplit方法的1個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: GetSplitAttributeFromSelection
// GetSplitAttributeFromSelection returns the class Attribute which maximises
// the information gain amongst consideredAttributes
//
// IMPORTANT: passing a zero-length consideredAttributes parameter will panic()
func (r *InformationGainRuleGenerator) GetSplitAttributeFromSelection(consideredAttributes []int, f *base.Instances) base.Attribute {
// Next step is to compute the information gain at this node
// for each randomly chosen attribute, and pick the one
// which maximises it
maxGain := math.Inf(-1)
selectedAttribute := -1
// Compute the base entropy
classDist := f.GetClassDistribution()
baseEntropy := getBaseEntropy(classDist)
// Compute the information gain for each attribute
for _, s := range consideredAttributes {
proposedClassDist := f.GetClassDistributionAfterSplit(f.GetAttr(s))
localEntropy := getSplitEntropy(proposedClassDist)
informationGain := baseEntropy - localEntropy
if informationGain > maxGain {
maxGain = informationGain
selectedAttribute = s
}
}
// Pick the one which maximises IG
return f.GetAttr(selectedAttribute)
}