本文整理匯總了Python中rnn.RNN屬性的典型用法代碼示例。如果您正苦於以下問題:Python rnn.RNN屬性的具體用法?Python rnn.RNN怎麽用?Python rnn.RNN使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類rnn
的用法示例。
在下文中一共展示了rnn.RNN屬性的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: softmax
# 需要導入模塊: import rnn [as 別名]
# 或者: from rnn import RNN [as 別名]
def softmax(xs):
# Applies the Softmax Function to the input array.
return np.exp(xs) / sum(np.exp(xs))
# Initialize our RNN!
示例2: processData
# 需要導入模塊: import rnn [as 別名]
# 或者: from rnn import RNN [as 別名]
def processData(data, backprop=True):
'''
Returns the RNN's loss and accuracy for the given data.
- data is a dictionary mapping text to True or False.
- backprop determines if the backward phase should be run.
'''
items = list(data.items())
random.shuffle(items)
loss = 0
num_correct = 0
for x, y in items:
inputs = createInputs(x)
target = int(y)
# Forward
out, _ = rnn.forward(inputs)
probs = softmax(out)
# Calculate loss / accuracy
loss -= np.log(probs[target])
num_correct += int(np.argmax(probs) == target)
if backprop:
# Build dL/dy
d_L_d_y = probs
d_L_d_y[target] -= 1
# Backward
rnn.backprop(d_L_d_y)
return loss / len(data), num_correct / len(data)
# Training loop
示例3: test
# 需要導入模塊: import rnn [as 別名]
# 或者: from rnn import RNN [as 別名]
def test(netFile,dataSet, model='RNN', trees=None):
if trees==None:
trees = tr.loadTrees(dataSet)
assert netFile is not None, "Must give model to test"
print "Testing netFile %s"%netFile
with open(netFile,'r') as fid:
opts = pickle.load(fid)
_ = pickle.load(fid)
if (model=='RNTN'):
nn = RNTN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
elif(model=='RNN'):
nn = RNN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
elif(model=='RNN2'):
nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
elif(model=='RNN2Drop'):
nn = RNN2Drop(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
elif(model=='RNN2DropMaxout'):
nn = RNN2DropMaxout(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
elif(opts.model=='RNN3'):
nn = RNN3(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
elif(model=='DCNN'):
nn = DCNN(opts.wvecDim,opts.ktop,opts.m1,opts.m2, opts.n1, opts.n2,0, opts.outputDim,opts.numWords, 2, opts.minibatch,rho=1e-4)
trees = cnn.tree2matrix(trees)
else:
raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN'%opts.model
nn.initParams()
nn.fromFile(fid)
print "Testing %s..."%model
cost,correct, guess, total = nn.costAndGrad(trees,test=True)
correct_sum = 0
for i in xrange(0,len(correct)):
correct_sum+=(guess[i]==correct[i])
# TODO
# Plot the confusion matrix?
confuse_matrix = np.zeros((5,5))
for i in range(len(correct)):
confuse_matrix[correct[i]][guess[i]] += 1
print "Cost %f, Acc %f"%(cost,correct_sum/float(total))
makeconf(confuse_matrix,model)
return cost, correct_sum/float(total)