本文整理汇总了Python中neuralnet.NeuralNet.calculate方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.calculate方法的具体用法?Python NeuralNet.calculate怎么用?Python NeuralNet.calculate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neuralnet.NeuralNet
的用法示例。
在下文中一共展示了NeuralNet.calculate方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: NNPlayer
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import calculate [as 别名]
#.........这里部分代码省略.........
if found: return min
# array has no positive values
else: return 0
def _addToZeros(self, num, arr):
""" adds num to all zero values in the array """
for index, val in enumerate(arr):
if val == 0:
arr[index] += num
return arr
def _addToArray(self, num, arr):
""" adds the num to all values in the array """
return [i + num for i in arr]
def _getBestIndex(self, validQvals):
""" chooses current expected best move """
maxVal = max(validQvals) # FIXME
bestMoves = [index for index, move in enumerate(validQvals) if move == maxVal]
# heuristic: choose last bucket
return int(bestMoves[-1])
def _getQvals(self, board):
""" retrieves the q values for all actions from the current state """
state = self._getState(board)
# create the input to neural network
toNN = [state[i-1] for i in range(1, self.inputSize)]
toNN.insert(0, 0.0)
# find expected rewards
qVals = []
for i in range(self.rowSize):
toNN[0] = float(i)
qVals.append(self.Q.calculate(toNN))
return qVals
def _getState(self, board):
""" constructs the state as a list """
mySide = board.mySide(self.id)
oppSide = board.oppSide(self.id)
myMancala = board.stonesInMyMancala(self.id)
oppMancala = board.stonesInOppMancala(self.id)
state = [] # size should be inputSize - 1
state.append(float(myMancala))
# for i in range(self.rowSize):
# state.append(mySide[i])
for my in mySide:
state.append(float(my))
state.append(float(oppMancala))
# for i in range(self.rowSize):
# state.append(oppSide[i])
for op in oppSide:
state.append(float(op))
return state
def gameOver(self, myScore, oppScore):
""" notifies learner that the game is over,
update the Q function based on win or loss and the move list """
if not self.learn:
return
reward = float(myScore) - float(oppScore)
self.movelist[self.id].append(reward)
self._updateGameRecord(self.movelist[self.id])
self.movelist[self.id] = []