本文整理汇总了Python中nn.math.make_onehot函数的典型用法代码示例。如果您正苦于以下问题:Python make_onehot函数的具体用法?Python make_onehot怎么用?Python make_onehot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了make_onehot函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: backprop
def backprop(self,xs,ys,hs,y_hat):
ns = len(xs)
h_final = hs[ns-1]
delta = (y_hat -ys)
self.grads.b2 += delta
ht = h_final.reshape(len(h_final),1)
delta = delta.reshape(len(ys),1)
self.grads.U += delta.dot(ht.T)
# H and L
t = ns-1 # last t
current = self.params.U.T.dot(delta) * ht * (1-ht) # the common part
prev_ht = hs[t-1].reshape(len(hs[t-1]),1)
self.grads.H += current.dot(prev_ht.T)
self.grads.b1 += current.reshape((len(current),))
xt = make_onehot(xs[t],self.vdim).reshape(self.vdim,1)
self.sgrads.L[xs[t]] = xt.dot(current.T)[xs[t]]
for i in range(1,self.bptt):
if t<i: # so that h[-2] doesn't return anything
continue
ht_i = hs[t-i].reshape(len(hs[t-i]),1)
prev_ht_i = hs[t-i-1].reshape(len(hs[t-i-1]),1)
current = self.params.H.T.dot(current)*ht_i*(1-ht_i)
self.grads.H += current.dot(prev_ht_i.T)
self.grads.b1 += current.reshape((len(current),))
prev_xt = make_onehot(xs[t-i],self.vdim).reshape(self.vdim,1)
self.sgrads.L[xs[t-i]] = prev_xt.dot(current.T)[xs[t-i]]
示例2: forwardProp
def forwardProp(self,node, correct=[], guess=[]):
cost = total = 0.0
# this is exactly the same setup as forwardProp in rnn.py
if node.isLeaf == True:
node.fprop = True
node.hActs1 = self.L[:,node.word]
node.hActs2 = self.ReLU(self.W2.dot(node.hActs1)+self.b2)
node.probs = softmax(self.Ws.dot(node.hActs2)+self.bs)
p = node.probs*make_onehot(node.label,len(self.bs))
cost = -np.log(np.sum(p))
correct.append(node.label)
guess.append(np.argmax(node.probs))
return cost, 1
c1,t1 = self.forwardProp(node.left,correct,guess)
c2,t2 = self.forwardProp(node.right,correct,guess)
if node.left.fprop and node.right.fprop:
node.fprop = True
h = np.hstack([node.left.hActs1, node.right.hActs1])
node.hActs1 = self.ReLU(self.W1.dot(h) + self.b1)
node.hActs2 = self.ReLU(self.W2.dot(node.hActs1) + self.b2)
node.probs = softmax(self.Ws.dot(node.hActs2)+self.bs)
p = node.probs*make_onehot(node.label,len(self.bs))
cost = -np.log(np.sum(p))
correct.append(node.label)
guess.append(np.argmax(node.probs))
cost += c1
cost += c2
total += t1
total += t2
return cost, total + 1
示例3: forwardProp
def forwardProp(self,node,correct, guess):
cost = total = 0.0
if node.isLeaf == True:
node.fprop = True
node.hActs1 = self.L[:, node.word]
node.probs = softmax(self.Ws.dot(node.hActs1)+self.bs)
p = node.probs*make_onehot(node.label, len(self.bs))
cost = -np.log(np.sum(p))
correct.append(node.label)
guess.append(np.argmax(node.probs))
return cost, 1
c1,t1 = self.forwardProp(node.left,correct,guess)
c2,t2 = self.forwardProp(node.right,correct,guess)
if node.left.fprop and node.right.fprop:
node.fprop = True
h = np.hstack([node.left.hActs1, node.right.hActs1])
tmp = np.zeros(len(node.left.hActs1))
for i in range(len(tmp)):
tmp[i] = h.dot(self.V[i]).dot(h)
node.hActs1 = self.ReLU(self.W.dot(h) + self.b + tmp)
node.probs = softmax(self.Ws.dot(node.hActs1)+self.bs)
p = node.probs*make_onehot(node.label,len(self.bs))
cost = -np.log(np.sum(p))
correct.append(node.label)
guess.append(np.argmax(node.probs))
cost += c1
cost += c2
total += t1
total += t2
return cost, total + 1
示例4: backprop
def backprop(self,xs,ys,hs_f,hs_b,y_hat):
inverted_xs = list(reversed(xs))
ns = len(xs)
ht_f = hs_f[ns-1].reshape(len(hs_f[ns-1]),1)
ht_b = hs_b[ns-1].reshape(len(hs_b[ns-1]),1)
delta = self.params.weights*(y_hat -ys)
self.grads.b2 += delta
delta = delta.reshape(len(ys),1)
self.grads.U += delta.dot(hstack([ht_f,ht_b]).reshape((1,2*len(ht_f))))
# H and L
t = ns-1 # last t
current_f = self.params.U.T.dot(delta)[:self.hdim] * ht_f * (1-ht_f)
current_b = self.params.U.T.dot(delta)[self.hdim:] * ht_b * (1-ht_b) # the common part
# update initial Hs
prev_ht_f = hs_f[t-1].reshape(len(hs_f[t-1]),1)
self.grads.H_f += current_f.dot(prev_ht_f.T)
self.grads.b1_f += current_f.reshape((len(current_f),))
prev_ht_b = hs_b[t-1].reshape(len(hs_b[t-1]),1)
self.grads.H_b += current_b.dot(prev_ht_b.T)
self.grads.b1_b += current_b.reshape((len(current_b),))
# update initial L
xt = make_onehot(xs[t],self.vdim).reshape(self.vdim,1)
self.sgrads.L[xs[t]] = xt.dot(current_f.T)[xs[t]]
inv_xt = make_onehot(inverted_xs[t],self.vdim).reshape(self.vdim,1)
self.sgrads.L[inverted_xs[t]] = inv_xt.dot(current_b.T)[inverted_xs[t]]
# update the rest
for i in range(1,self.bptt):
if t<i: # so that h[-2] doesn't return anything
continue
ht_f_i = hs_f[t-i].reshape(len(hs_f[t-i]),1)
prev_ht_f_i = hs_f[t-i-1].reshape(len(hs_f[t-i-1]),1)
current_f = self.params.H_f.T.dot(current_f)*ht_f_i*(1-ht_f_i)
self.grads.H_f += current_f.dot(prev_ht_f_i.T)
self.grads.b1_f += current_f.reshape((len(current_b),))
ht_b_i = hs_b[t-i].reshape(len(hs_b[t-i]),1)
prev_ht_b_i = hs_b[t-i-1].reshape(len(hs_b[t-i-1]),1)
current_b = self.params.H_b.T.dot(current_b)*ht_b_i*(1-ht_b_i)
self.grads.H_b += current_b.dot(prev_ht_b_i.T)
self.grads.b1_b += current_b.reshape((len(current_b),))
prev_xt = make_onehot(xs[t-i],self.vdim).reshape(self.vdim,1)
self.sgrads.L[xs[t-i]] = prev_xt.dot(current_f.T)[xs[t-i]]
prev_inv_xt = make_onehot(inverted_xs[t-i],self.vdim).reshape(self.vdim,1)
self.sgrads.L[inverted_xs[t-i]] = prev_inv_xt.dot(current_b.T)[inverted_xs[t-i]]
示例5: backProp
def backProp(self,node,error=None):
# Clear nodes
node.fprop = False
################
# TODO: Implement the recursive backProp function
# - you should update self.dWs, self.dbs, self.dW, self.db, and self.dL[node.word] accordingly
# - node: your current node in the parse tree
# - error: error that has been passed down from a previous iteration
################
errorCur = node.probs - make_onehot(node.label,len(self.bs))
self.dWs += np.outer(errorCur, node.hActs1)
self.dbs += errorCur
errorCur = errorCur.dot(self.Ws)
if error is not None:
errorCur += error
if node.isLeaf == True:
self.dL[node.word] += errorCur
return
errorCur = errorCur*self.df(node.hActs1)
self.dW += np.outer(errorCur,np.hstack([node.left.hActs1, node.right.hActs1]))
self.db += errorCur
errorDown = errorCur.dot(self.W)
self.backProp(node.left,errorDown[:self.wvecDim])
self.backProp(node.right,errorDown[self.wvecDim:])
示例6: backProp
def backProp(self,node,error=None):
# Clear nodes
node.fprop = False
# this is exactly the same setup as backProp in rnn.py
errorCur = node.probs - make_onehot(node.label,len(self.bs))
self.dWs += np.outer(errorCur,node.hActs2)
self.dbs += errorCur
errorCur = errorCur.dot(self.Ws)*self.df(node.hActs2)
self.dW2 += np.outer(errorCur,node.hActs1)
self.db2 += errorCur
errorCur = errorCur.dot(self.W2)
if error is not None:
errorCur += error
if node.isLeaf == True:
self.dL[node.word] += errorCur
return
errorCur = errorCur*self.df(node.hActs1)
tmp1 = np.ones(self.W1.shape).dot(np.diag(np.hstack([node.left.hActs1, node.right.hActs1])))
self.dW1 += np.diag(errorCur).dot(tmp1)
self.db1 += errorCur
errorCur = errorCur.dot(self.W1)
self.backProp(node.left,errorCur[:self.wvecDim])
self.backProp(node.right,errorCur[self.wvecDim:])
示例7: compute_loss
def compute_loss(self, windows, labels):
"""
Compute the loss for a given dataset.
windows = same as for predict_proba
labels = list of class labels, for each row of windows
"""
#### YOUR CODE HERE ####
print "windows shape ", windows.shape
x = self.sparams.L[windows[:,0]]
for i in range(len(windows[0])-1):
x = np.concatenate((x,self.sparams.L[windows[:,i+1]]),axis=1)
z = self.params.W.dot(x.T)+self.params.b1.reshape((self.params.b1.shape[0],1))
h = tanh(z)
p = softmax(self.params.U.dot(h)+self.params.b2.reshape((self.params.b2.shape[0],1)))
labelArray = np.zeros((len(labels),self.params.b2.shape[0]))
for i in range(len(labels)):
labelArray[i] = make_onehot(labels[i],self.params.b2.shape[0])
batch = len(labels)
p = p*labelArray.T
p = np.sum(p,axis=0)
J = np.sum(-np.log(p))
Jreg = batch*(self.lreg/2.0)*(np.sum(self.params.W**2)+np.sum(self.params.U**2))
J += Jreg
#### END YOUR CODE ####
return J
示例8: _acc_grads
def _acc_grads(self, window, label):
"""
Accumulate gradients, given a training point
(window, label) of the format
window = [x_{i-1} x_{i} x_{i+1}] # three ints
label = {0,1,2,3,4} # single int, gives class
Your code should update self.grads and self.sgrads,
in order for gradient_check and training to work.
So, for example:
self.grads.U += (your gradient dJ/dU)
self.sgrads.L[i] = (gradient dJ/dL[i]) # this adds an update for that index
"""
xf = []
for idx in window:
xf.extend( self.sparams.L[idx]) # extract representation
tanhX = tanh(self.params.W.dot(xf) + self.params.b1)
softmaxP = softmax(self.params.U.dot(tanhX) + self.params.b2)
y = make_onehot(label, len(softmaxP))
delta2 = softmaxP -y
self.grads.U += outer(delta2, tanhX) + self.lreg * self.params.U
self.grads.b2 += delta2
delta1 = self.params.U.T.dot(delta2)*(1. - tanhX*tanhX)
self.grads.W += outer(delta1, xf) + self.lreg * self.params.W
self.grads.b1 += delta1
示例9: compute_loss
def compute_loss(self, windows, labels):
"""
Compute the loss for a given dataset.
windows = same as for predict_proba
labels = list of class labels, for each row of windows
"""
#### YOUR CODE HERE ####
if not hasattr(windows[0], "__iter__"):
windows = [windows]
labels = [labels]
N = len(windows)
# x = self.sparams.L[windows]
# x = x.reshape((N,x.shape[-2]*x.shape[-1]))
# z = x.dot(self.params.W.T) + self.params.b1
# h = tanh(z)
# z2 = h.dot(self.params.U.T) + self.params.b2
# p = softmax(z2)
# J -= sum(log(p[0][labels])
# J += (self.lreg / 2.0) * (sum(self.params.W**2.0) + sum(self.params.U**2.0))
J = 0
for n in xrange(N):
x = self.sparams.L[windows[n]]
x = reshape(x, x.shape[0]*x.shape[1])
h = tanh(self.params.W.dot(x) + self.params.b1)
y_hat = softmax(self.params.U.dot(h) + self.params.b2)
y = make_onehot(labels[n], len(y_hat))
J -= sum(y*log(y_hat))
J += (self.lreg / 2.0) * (sum(self.params.W**2.0) + sum(self.params.U**2.0))
#### END YOUR CODE ####
return J
示例10: b_prop
def b_prop(self, ys):
#L = self.params['L']
Wh = self.params['Wh']
#Wx = self.params['Wx']
U = self.params['U']
b1 = self.params['b1']
b2 = self.params['b2']
N = len(ys)
delta_above = np.zeros(self.hdim)
for t in xrange(N-1,-1, -1):
delta_3 = self.yhats[:,t] - make_onehot(ys[t], self.outdim)
self.grads['U'] += np.outer(delta_3, self.hs[:,t])
self.grads['b2'] += delta_3
dh = np.dot(np.transpose(U), delta_3) + delta_above
delta_2 = dh * (self.hs[:,t] > 0)
self.grads['b1'] += delta_2
self.grads['Wh'] += np.outer(delta_2, self.hs[:,t-1])
#self.grads['Wx'] += np.outer(delta_2, L[:,xs[t]])
#self.grads['L'][:,xs[t]] += np.dot(np.transpose(Wx), delta_2)
delta_below = np.dot(np.transpose(Wh), delta_2)
delta_above = delta_below
return delta_below
示例11: backProp
def backProp(self,node,error=None):
# Clear nodes
node.fprop = False
errorCur = node.probs - make_onehot(node.label,len(self.bs))
self.dWs += np.outer(errorCur, node.hActs1)
self.dbs += errorCur
errorCur = errorCur.dot(self.Ws)
if error is not None:
errorCur += error
if node.isLeaf == True:
self.dL[node.word] += errorCur
return
errorCur = errorCur*self.df(node.hActs1)
LR = np.hstack([node.left.hActs1, node.right.hActs1])
self.dW += np.outer(errorCur,LR)
self.db += errorCur
S = np.zeros(len(LR))
for i in range(len(self.V)):
self.dV[i] += errorCur[i]*np.outer(LR,LR)
S += (self.V[i]+self.V[i].T).dot(LR)*errorCur[i]
errorDown = errorCur.dot(self.W) + S
self.backProp(node.left,errorDown[:self.wvecDim])
self.backProp(node.right,errorDown[self.wvecDim:])
示例12: _acc_grads
def _acc_grads(self, window, label):
"""
Accumulate gradients, given a training point
(window, label) of the format
window = [x_{i-1} x_{i} x_{i+1}] # three ints
label = {0,1,2,3,4} # single int, gives class
Your code should update self.grads and self.sgrads,
in order for gradient_check and training to work.
So, for example:
self.grads.U += (your gradient dJ/dU)
self.sgrads.L[i] = (gradient dJ/dL[i]) # this adds an update for that index
"""
#### YOUR CODE HERE ####
##
# Forward propagation
x = hstack(self.sparams.L[window, :])
h = tanh(2*(self.params.W.dot(x)+self.params.b1))
p = softmax(self.params.U.dot(h)+self.params.b2)
##
y = make_onehot(label, 5)
delta = p - y
# Backpropagation
self.grads.U += outer(delta, h) + self.lreg * self.params.U
self.grads.b2 += delta
gradh = dot(self.params.U.T,delta) * (1-h**2)
self.grads.W += outer(gradh, x) + self.lreg * self.params.W
self.grads.b1 += gradh
dL = self.params.W.T.dot(gradh).reshape(self.window_size, self.word_vec_size)
for i in xrange(self.window_size):
self.sgrads.L[window[i], :] = dL[i]
示例13: _acc_grads
def _acc_grads(self, xs, ys):
"""
Accumulate gradients, given a pair of training sequences:
xs = [<indices>] # input words
ys = [<indices>] # output words (to predict)
Your code should update self.grads and self.sgrads,
in order for gradient_check and training to work.
So, for example:
self.grads.H += (your gradient dJ/dH)
self.sgrads.L[i] = (gradient dJ/dL[i]) # update row
Per the handout, you should:
- make predictions by running forward in time
through the entire input sequence
- for *each* output word in ys, compute the
gradients with respect to the cross-entropy
loss for that output word
- run backpropagation-through-time for self.bptt
timesteps, storing grads in self.grads (for H)
and self.sgrads (for L,U)
You'll want to store your predictions \hat{y}(t)
and the hidden layer values h(t) as you run forward,
so that you can access them during backpropagation.
At time 0, you should initialize the hidden layer to
be a vector of zeros.
"""
# Expect xs as list of indices
ns = len(xs) #3
# make matrix here of corresponding h(t)
# hs[-1] = initial hidden state (zeros)
hs = zeros((ns+1, self.hdim))
# predicted probas
ps = zeros((ns, self.vdim))
#### YOUR CODE HERE ####
##
# Forward propagation
# for each time step
for t in xrange(ns):
hs[t] = sigmoid(dot(self.params.H, hs[t - 1]) + self.sparams.L[xs[t]])
ps[t] = softmax(dot(self.params.U, hs[t]))
##
# Backward propagation through time
for j in xrange(ns):
y = make_onehot(ys[j], self.vdim)
y_hat_minus_y = ps[j] - y
self.grads.U += outer(y_hat_minus_y, hs[j])
delta = dot(self.params.U.T, y_hat_minus_y) * hs[j] * (1.0 - hs[j])
# start at j and go back self.bptt times (total self.bptt + 1 elements, including current one)
for t in xrange(j, j - self.bptt - 1, -1):
if t - 1 >= -1:
self.grads.H += outer(delta, hs[t - 1]) #See from above.. hs[-1] is list of zeros.
self.sgrads.L[xs[t]] = delta
delta = dot(self.params.H.T, delta) * hs[t - 1] * (1.0 - hs[t - 1])
示例14: forwardProp
def forwardProp(self,node, correct=[], guess=[]):
cost = total = 0.0
# this is exactly the same setup as forwardProp in rnn.py
if node.isLeaf == True:
node.fprop = True
node.hActs1 = self.L[:,node.word]
#node.hActs2 = self.ReLU(self.W2.dot(node.hActs1)+self.b2)
tmp = node.hActs1*self.mask1
tmpMaxout = np.zeros((self.maxoutK, self.middleDim))
for i in range(self.maxoutK):
tmpMaxout[i] = self.W2[i].dot(tmp) + self.b2[i]
(node.hActs2, node.idx) = self.maxout(tmpMaxout)
node.probs = softmax(self.Ws.dot(node.hActs2*self.mask)+self.bs)
p = node.probs*make_onehot(node.label,len(self.bs))
cost = -np.log(np.sum(p))
correct.append(node.label)
guess.append(np.argmax(node.probs))
return cost, 1
c1,t1 = self.forwardProp(node.left,correct,guess)
c2,t2 = self.forwardProp(node.right,correct,guess)
if node.left.fprop and node.right.fprop:
node.fprop = True
h = np.hstack([node.left.hActs1, node.right.hActs1])
node.hActs1 = self.ReLU(self.W1.dot(h) + self.b1)
#node.hActs2 = self.ReLU(self.W2.dot(node.hActs1)+self.b2)
tmp = node.hActs1*self.mask1
tmpMaxout = np.zeros((self.maxoutK, self.middleDim))
for i in range(self.maxoutK):
tmpMaxout[i] = self.W2[i].dot(tmp) + self.b2[i]
(node.hActs2, node.idx) = self.maxout(tmpMaxout)
node.probs = softmax(self.Ws.dot(node.hActs2*self.mask)+self.bs)
p = node.probs*make_onehot(node.label,len(self.bs))
cost = -np.log(np.sum(p))
correct.append(node.label)
guess.append(np.argmax(node.probs))
cost += c1
cost += c2
total += t1
total += t2
return cost, total + 1
示例15: _acc_grads
def _acc_grads(self, window, label):
"""
Accumulate gradients, given a training point
(window, label) of the format
window = [x_{i-1} x_{i} x_{i+1}] # three ints
label = {0,1,2,3,4} # single int, gives class
Your code should update self.grads and self.sgrads,
in order for gradient_check and training to work.
So, for example:
self.grads.U += (your gradient dJ/dU)
self.sgrads.L[i] = (gradient dJ/dL[i]) # this adds an update for that index
"""
#### YOUR CODE HERE ####
L = self.sparams.L
U = self.params.U
W = self.params.W
b1 = self.params.b1
b2 = self.params.b2
windowSize = self.windowSize
wordVecLen = self.wordVecLen
lambda_ = self.lreg
alpha = self.alpha
##
# Forward propagation
x = hstack(L[window, :])
z1 = W.dot(x) + b1
h = tanh(z1)
z2 = U.dot(h) + b2
y_hat = softmax(z2)
##
# Backpropagation
target = make_onehot(label, len(y_hat))
delta = y_hat - target
#self.grads.U += delta.dot(h.T) + lambda_ * U
#outer函数很有用
self.grads.U += outer(delta, h) + lambda_ * U
self.grads.b2 += delta
grad_h = U.T.dot(delta) * (1 - h ** 2)
self.grads.W += outer(grad_h, x) + lambda_ * W
self.grads.b1 += grad_h
sgrad_L = W.T.dot(grad_h)
sgrad_L = sgrad_L.reshape(windowSize, wordVecLen)
for i in xrange(windowSize):
self.sgrads.L[window[i], :] = sgrad_L[i, :]