本文整理匯總了Python中Tools.Tools.tanh方法的典型用法代碼示例。如果您正苦於以下問題:Python Tools.tanh方法的具體用法?Python Tools.tanh怎麽用?Python Tools.tanh使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類Tools.Tools
的用法示例。
在下文中一共展示了Tools.tanh方法的1個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from Tools import Tools [as 別名]
# 或者: from Tools.Tools import tanh [as 別名]
class Neuron:
def __init__(self, numberOutputs, index):
self.outputVal = 0.0
self.index = index
self.gradient = 0.0
self.learningRate = 0.15
self.alpha = 0.1
self.tools = Tools()
# for the neuron thas this neuron will feeds
# Array of Connections
self.outputWeights = []
for i in range(0, numberOutputs):
self.outputWeights.append(Connection())
self.outputWeights[i].weight = self.randomWeight()
#print self.outputWeights[i].weight
def __repr__(self):
return "Neuron : index = " + str(self.index) + ", outputVal = " + str(self.outputVal) + "\n" # + ", Connections : " + str(self.outputWeights)
def __str__(self):
return self.__repr__()
def feedForward(self, prevLayer):
# Sum the previous layer's ouputs (which are our inputs)
# Include the bias node from the previous layer
sum = 0.0
for i in range(0, len(prevLayer)):
sum += prevLayer[i].outputVal * prevLayer[i].outputWeights[self.index].weight
# update the outputVal
self.outputVal = self.activationFunction(sum)
def activationFunction(self, x):
#return self.tools.sigmoid(x)
return self.tools.tanh(x)
def activationFunctionDerivative(self, x):
#return self.tools.sigmoidDerivative(x)
return self.tools.tanhDerivative(x)
def randomWeight(self):
return random.uniform(0.0, 1.0)
def calcOutputGradients(self, targetVal):
delta = targetVal - self.outputVal
self.gradient = delta * self.activationFunctionDerivative(self.outputVal)
def calcHiddenGradients(self, nextLayer):
dow = self.sumDow(nextLayer)
self.gradient = dow * self.activationFunctionDerivative(self.outputVal)
def sumDow(self, nextLayer):
sum = 0.0
for i in range(0, len(nextLayer) - 1):
sum += (self.outputWeights[i].weight * nextLayer[i].gradient)
return sum
def updateInputWeights(self, prevLayer):
for i in range(0, len(prevLayer)):
neuron = prevLayer[i]
oldDeltaWeight = neuron.outputWeights[self.index].deltaWeight
newDeltaWeight = self.learningRate * neuron.outputVal * self.gradient + self.alpha * oldDeltaWeight
neuron.outputWeights[self.index].deltaWeight = newDeltaWeight
neuron.outputWeights[self.index].weight += newDeltaWeight