本文整理汇总了Python中theano.dot函数的典型用法代码示例。如果您正苦于以下问题:Python dot函数的具体用法?Python dot怎么用?Python dot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dot函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: oneStep
def oneStep(u_tm4,u_t,x_tm3,x_tm1,y_tm1,W,W_in_1,W_in_2,W_feedback,W_out):
x_t=T.tanh(theano.dot(x_tm1,W)+\
theano.dot(u_t,W_in_1)+\
theano.dot(u_tm4,W_in_2)+\
theano.dot(y_tm1,W_feedback))
y_t=theano.dot(x_tm3,W_out)
return [x_t,y_t]
示例2: encoder
def encoder(infomatf, infomatb, htm1matf, ctm1matf, htm1matb, ctm1matb, Eenf, Eenb, Wenf, Wenb, benf, benb):
# infomat is a matrix, having # batch * D
dim = Eenf.shape[1]
#
xtmatf = theano.dot(infomatf, Eenf)
xtmatb = theano.dot(infomatb, Eenb)
#
pretranf = T.concatenate([xtmatf, htm1matf], axis=1)
pretranb = T.concatenate([xtmatb, htm1matb], axis=1)
#
posttranf = theano.dot(pretranf, Wenf) + benf
posttranb = theano.dot(pretranb, Wenb) + benb
#
itmatf = T.nnet.sigmoid(posttranf[:, 0:dim])
ftmatf = T.nnet.sigmoid(posttranf[:, dim : (2 * dim)])
gtmatf = T.tanh(posttranf[:, (2 * dim) : (3 * dim)])
otmatf = T.nnet.sigmoid(posttranf[:, (3 * dim) :])
ctmatf = ftmatf * ctm1matf + itmatf * gtmatf
#
htmatf = otmatf * T.tanh(ctmatf)
#
itmatb = T.nnet.sigmoid(posttranb[:, 0:dim])
ftmatb = T.nnet.sigmoid(posttranb[:, dim : (2 * dim)])
gtmatb = T.tanh(posttranb[:, (2 * dim) : (3 * dim)])
otmatb = T.nnet.sigmoid(posttranb[:, (3 * dim) :])
ctmatb = ftmatb * ctm1matb + itmatb * gtmatb
#
htmatb = otmatb * T.tanh(ctmatb)
#
return htmatf, ctmatf, htmatb, ctmatb
示例3: test_specify_shape_inplace
def test_specify_shape_inplace(self):
# test that specify_shape don't break inserting inplace op
dtype = self.dtype
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
a = numpy.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
a = self.cast_value(a)
a_shared = self.shared_constructor(a)
b = numpy.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
b = self.cast_value(b)
b_shared = self.shared_constructor(b)
s = numpy.zeros((40, 40), dtype=dtype)
s = self.cast_value(s)
s_shared = self.shared_constructor(s)
f = theano.function([], updates={s_shared: theano.dot(a_shared, b_shared) + s_shared})
topo = f.maker.env.toposort()
f()
# [Gemm{inplace}(<TensorType(float64, matrix)>, 0.01, <TensorType(float64, matrix)>, <TensorType(float64, matrix)>, 2e-06)]
if theano.config.mode != "FAST_COMPILE":
assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
assert all(
node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm)
)
assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
# Their is no inplace gemm for sparse
# assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "StructuredDot")
s_shared_specify = tensor.specify_shape(s_shared, s_shared.get_value(borrow=True).shape)
# now test with the specify shape op in the output
f = theano.function(
[], s_shared.shape, updates={s_shared: theano.dot(a_shared, b_shared) + s_shared_specify}
)
topo = f.maker.env.toposort()
shp = f()
assert numpy.all(shp == (40, 40))
if theano.config.mode != "FAST_COMPILE":
assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
assert all(
node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm)
)
assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
# now test with the specify shape op in the inputs and outputs
a_shared = tensor.specify_shape(a_shared, a_shared.get_value(borrow=True).shape)
b_shared = tensor.specify_shape(b_shared, b_shared.get_value(borrow=True).shape)
f = theano.function(
[], s_shared.shape, updates={s_shared: theano.dot(a_shared, b_shared) + s_shared_specify}
)
topo = f.maker.env.toposort()
shp = f()
assert numpy.all(shp == (40, 40))
if theano.config.mode != "FAST_COMPILE":
assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
assert all(
node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm)
)
assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
示例4: apply
def apply(self, state_below, mask_below, init_state=None, context=None):
if state_below.ndim == 3:
batch_size = state_below.shape[1]
n_steps = state_below.shape[0]
else:
raise NotImplementedError
if self.with_contex:
if init_state is None:
init_state = T.tanh(theano.dot(context, self.W_c_init))
c_z = theano.dot(context, self.W_cz)
c_r = theano.dot(context, self.W_cr)
c_h = theano.dot(context, self.W_ch)
non_sequences = [c_z, c_r, c_h]
rval, updates = theano.scan(self._step_forward_with_context,
sequences=[state_below, mask_below],
outputs_info=[init_state],
non_sequences=non_sequences,
n_steps=n_steps
)
else:
if init_state is None:
init_state = T.alloc(numpy.float32(0.), batch_size, self.n_hids)
rval, updates = theano.scan(self._step_forward,
sequences=[state_below, mask_below],
outputs_info=[init_state],
n_steps=n_steps
)
self.output = rval
return self.output
示例5: _step_forward_with_attention
def _step_forward_with_attention(self, x_t, x_m, h_tm1, c, c_mask, c_x):
'''
x_t: input at time t
x_m: mask of x_t
h_tm1: previous state
c_x: contex of the rnn
'''
# attended = self.attention_layer.apply(c, c_mask, h_tm1)
# c_z = theano.dot(attended, self.W_cz)
# c_r = theano.dot(attended, self.W_cr)
# c_h = theano.dot(attended, self.W_ch)
# return [self._step_forward_with_context(x_t, x_m, h_tm1, c_z, c_r, c_h), attended]
#### new arc
h1 = self._step_forward(x_t, x_m, h_tm1)
attended = self.attention_layer.apply(c, c_mask, c_x, h1 )
z = T.nnet.sigmoid(theano.dot(attended, self.W_cz)
+ theano.dot(h1, self.W_hz2) + self.b_z2)
r = T.nnet.sigmoid(theano.dot(attended, self.W_cr)
+ theano.dot(h1, self.W_hr2) + self.b_r2)
c_h = theano.dot(attended, self.W_ch)
h2 = T.tanh((T.dot(h1, self.W_hh2) + self.b_h2) * r + c_h)
h2 = h1 * z + (1. - z) * h2
if x_m:
h2 = x_m[:, None] * h2 + (1. - x_m)[:, None] * h1
return h2, attended
示例6: one_step_no_output
def one_step_no_output(self, x_t, h_tm1, W_xc, W_hc, b_c, W_ih, W_hh, W_ho, b_o, b_h):
C = sigmoid(theano.dot(x_t, W_xc) + theano.dot(h_tm1, W_hc) + b_c)
h_t_hat = T.tanh(theano.dot(x_t, W_ih) + theano.dot(h_tm1, W_hh) + b_h)
h_t = (1 - C) * h_t_hat + C * x_t
if self.ignore_zero:
return [h_t, h_t], theano.scan_module.until(T.eq(T.sum(abs(x_t)), 0))
return [h_t, h_t]
示例7: one_step
def one_step(self, x_t, h_tm1, W_ih, W_hh, b_h, W_ho, b_o):
h_t = T.tanh(theano.dot(x_t, W_ih) + theano.dot(h_tm1, W_hh) + b_h)
y_t = theano.dot(h_t, W_ho) + b_o
y_t = sigmoid(y_t)
if self.ignore_zero:
return [h_t, y_t], theano.scan_module.until(T.eq(T.sum(abs(x_t)), 0))
return [h_t, y_t]
示例8: setL
def setL(x, name1="w", name2="b", name3="b_", act="sigmoid"):
w = self.seg.params[name1]
b = self.seg.params[name2]
b_ = self.seg.params[name3]
activate = self.getfunc(act)
y = activate(theano.dot(x, w) + b)
z = activate(theano.dot(y, w.T) + b_)
return zip([w, b, b_], theano.grad(self.lossfunc(x, z), [w, b, b_]))
示例9: step
def step(x_t, h_t_1, W_h, W_x, W_y):
# Add breakpoint
h = t.tanh(theano.dot(W_h, h_t_1) + theano.dot(W_x, x_t) + b_h)
y = (theano.dot(W_y, h) + b_y)
e_y = t.exp(y - y.max())
smax_y = e_y / e_y.sum()
return h, smax_y
示例10: __init__
def __init__(self, name, inp):
eqvars = self.arrdict[name]
w_hidden, b_hidden, w_output, b_output = eqvars
hidden = T.dot(w_hidden.T, inp) + b_hidden
hidden_act = M.tanh(hidden)
output = (T.dot(w_output.T, hidden_act) + b_output)
self.proj = output.sum()
示例11: __init__
def __init__(self, input, w, b, params=[]):
self.output=nnet.softmax(theano.dot(input, w)+b)
self.l1=abs(w).sum()
self.l2_sqr = (w**2).sum()
self.argmax=theano.tensor.argmax(theano.dot(input, w)+b, axis=input.ndim-1)
self.input = input
self.w = w
self.b = b
self.params = params
示例12: hidden_cov_units_preactivation_given_v
def hidden_cov_units_preactivation_given_v(self, v, small=0.5):
"""Return argument to the sigmoid that would give mean of covariance hid units
See the math at the top of this file for what 'adjusted' means.
return b - 0.5 * dot(adjusted(v), U)**2
"""
unit_v = v / (TT.sqrt(TT.mean(v**2, axis=1)+small)).dimshuffle(0,'x') # adjust row norm
return self.b + 0.5 * dot(dot(unit_v, self.U)**2, self.P)
示例13: build_mdn_predict
def build_mdn_predict(proj, x, tparams):
x_diff_squared_avg = tensor.mean((x[:,1:] - x[:,:-1])**2,axis=1)
invsigma_given_x = tensor.maximum(tensor.nnet.sigmoid(theano.dot(
proj, tparams['U_sigma']) + tparams['b_sigma'])
, 1e-8)/ x_diff_squared_avg[:, None]
mu = theano.dot(proj, tparams['U_mu']) + tparams['b_mu']
p_mix_given_x = tensor.maximum(tensor.minimum(tensor.nnet.softmax(
tensor.dot(proj, tparams['U_mix']) + tparams['b_mix']), 1e-6), 1-1e-6)
p_mix_given_x = tensor.log(p_mix_given_x / (tensor.sum(p_mix_given_x, axis=1)[:, None] + 10 * EPS) + EPS)
return invsigma_given_x, mu, p_mix_given_x
示例14: one_step
def one_step(x_t, h_tminus1, c_tminus1):
i_t = sigmoid(theano.dot(x_t, self.W_xi) + theano.dot(h_tminus1, self.W_hi) + self.b_i)
f_t = sigmoid(theano.dot(x_t, self.W_xf) + theano.dot(h_tminus1, self.W_hf) + self.b_f)
o_t = sigmoid(theano.dot(x_t, self.W_xo) + theano.dot(h_tminus1, self.W_ho) + self.b_o)
g_t = self.activation_fun(theano.dot(x_t, self.W_xg) + theano.dot(h_tminus1, self.W_hg) + self.b_g)
c_t = f_t * c_tminus1 + i_t * g_t
h_t = o_t * self.activation_fun(c_t)
y_t = sigmoid(theano.dot(h_t, self.W_hy) + self.b_y)
return [h_t, c_t, y_t]
示例15: step
def step(self, x_t, h_tm1, W_ih, W_hh, b_h, W_ho, b_o):
# h_t = g(W_ih x_t + W_hh h_tm1 + bh)
### Does not work on recurrent layer, see http://arxiv.org/pdf/1311.0701v7.pdf
h_t = self.g(theano.dot(x_t, W_ih) + theano.dot(h_tm1, W_hh) + b_h)
# y_t = act(W_ho h_t + b_o)
### y_t = self.act(theano.dot(h_t, W_ho) + b_o)
y_t = self.act(theano.dot(h_t, W_ho) + b_o)
return [h_t, y_t]