本文整理汇总了Python中Tools.Tools.tanhDerivative方法的典型用法代码示例。如果您正苦于以下问题:Python Tools.tanhDerivative方法的具体用法?Python Tools.tanhDerivative怎么用?Python Tools.tanhDerivative使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Tools.Tools
的用法示例。
在下文中一共展示了Tools.tanhDerivative方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from Tools import Tools [as 别名]
# 或者: from Tools.Tools import tanhDerivative [as 别名]
class Neuron:
def __init__(self, numberOutputs, index):
self.outputVal = 0.0
self.index = index
self.gradient = 0.0
self.learningRate = 0.15
self.alpha = 0.1
self.tools = Tools()
# for the neuron thas this neuron will feeds
# Array of Connections
self.outputWeights = []
for i in range(0, numberOutputs):
self.outputWeights.append(Connection())
self.outputWeights[i].weight = self.randomWeight()
#print self.outputWeights[i].weight
def __repr__(self):
return "Neuron : index = " + str(self.index) + ", outputVal = " + str(self.outputVal) + "\n" # + ", Connections : " + str(self.outputWeights)
def __str__(self):
return self.__repr__()
def feedForward(self, prevLayer):
# Sum the previous layer's ouputs (which are our inputs)
# Include the bias node from the previous layer
sum = 0.0
for i in range(0, len(prevLayer)):
sum += prevLayer[i].outputVal * prevLayer[i].outputWeights[self.index].weight
# update the outputVal
self.outputVal = self.activationFunction(sum)
def activationFunction(self, x):
#return self.tools.sigmoid(x)
return self.tools.tanh(x)
def activationFunctionDerivative(self, x):
#return self.tools.sigmoidDerivative(x)
return self.tools.tanhDerivative(x)
def randomWeight(self):
return random.uniform(0.0, 1.0)
def calcOutputGradients(self, targetVal):
delta = targetVal - self.outputVal
self.gradient = delta * self.activationFunctionDerivative(self.outputVal)
def calcHiddenGradients(self, nextLayer):
dow = self.sumDow(nextLayer)
self.gradient = dow * self.activationFunctionDerivative(self.outputVal)
def sumDow(self, nextLayer):
sum = 0.0
for i in range(0, len(nextLayer) - 1):
sum += (self.outputWeights[i].weight * nextLayer[i].gradient)
return sum
def updateInputWeights(self, prevLayer):
for i in range(0, len(prevLayer)):
neuron = prevLayer[i]
oldDeltaWeight = neuron.outputWeights[self.index].deltaWeight
newDeltaWeight = self.learningRate * neuron.outputVal * self.gradient + self.alpha * oldDeltaWeight
neuron.outputWeights[self.index].deltaWeight = newDeltaWeight
neuron.outputWeights[self.index].weight += newDeltaWeight