本文整理匯總了Python中theano.tensor.batched_tensordot方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.batched_tensordot方法的具體用法?Python tensor.batched_tensordot怎麽用?Python tensor.batched_tensordot使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.batched_tensordot方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _get_jac_z_vars
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def _get_jac_z_vars(self):
if not self.predictor.feature_jacobian_name:
raise NotImplementedError
X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars
names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)
y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
y_target_var,
alpha_var * y_target_var + (1 - alpha_var) * y_var)
for (y_var, y_target_var) in zip(y_vars, y_target_vars)]
jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
y_next_pred_vars = [T.flatten(next_feature_var, outdim=2) for next_feature_var in next_feature_vars]
y_next_pred_vars = [theano.clone(y_next_pred_var, replace={U_var: U_lin_var}) for y_next_pred_var in y_next_pred_vars]
z_vars = [y_target_var - y_next_pred_var + T.batched_tensordot(jac_var, U_lin_var, axes=(2, 1))
for (y_target_var, y_next_pred_var, jac_var) in zip(y_target_vars, y_next_pred_vars, jac_vars)]
return jac_vars, z_vars
示例2: batch_dot
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def batch_dot(x, y, axes=None):
if axes is None:
# behaves like tf.batch_matmul as default
axes = [(x.ndim-1,), (y.ndim-2,)]
return T.batched_tensordot(x, y, axes=axes)
示例3: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def __init__(self, input, n_in, n_out):
## input has shape (batchSize, seqLen, n_in)
## input shall be a binary tensor, each row has only one 1
self.n_in = n_in
self.n_out = n_out
self.input = input
value_bound = np.sqrt(6./(n_in * n_in + n_out))
W_values = np.asarray(np.random.uniform(low = - value_bound, high = value_bound, size=(n_in, n_in, n_out)), dtype=theano.config.floatX)
self.W = theano.shared (value = W_values, name = 'EmbeddingLayer_W', borrow=True)
## out1 shall have shape (batchSize, seqLen, n_in, n_out)
out1 = T.tensordot(input, self.W, axes=1)
##out2 has shape(batchSize, n_out, seqLen, n_in)
out2 = out1.dimshuffle(0, 3, 1, 2)
##input2 has shape(batchSize, n_in, seqLen)
input2 = input.dimshuffle(0,2,1)
##out3 shall have shape (batchSize, n_out, seqLen, seqLen)
out3 = T.batched_tensordot(out2, input2, axes=1)
##output has shape (batchSize, seqLen, seqLen, n_out)
self.output = out3.dimshuffle(0, 2, 3, 1)
self.params = [self.W]
self.paramL1 = abs(self.W).sum()
self.paramL2 = (self.W**2).sum()
##self.pcenters = (self.W.sum(axis=[0, 1])**2 ).sum()
self.pcenters = (self.W.mean(axis=[0, 1])**2 ).sum()
示例4: _get_phi_var
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def _get_phi_var(self):
X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars
A_split_var, b_split_var, c_split_var = self._get_A_b_c_split_vars()
phi_errors_var = (T.batched_tensordot(T.batched_tensordot(A_split_var.dimshuffle((1, 0, 2, 3)), U_var, axes=(3, 1)), U_var, axes=(2, 1))
- 2 * T.batched_tensordot(b_split_var.dimshuffle((1, 0, 2)), U_var, axes=(2, 1))
+ c_split_var.T)
phi_actions_var = U_var ** 2
phi_var = T.concatenate([phi_errors_var / self.repeats, phi_actions_var], axis=1)
return phi_var
示例5: _get_phi2_var
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def _get_phi2_var(self):
X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars
A_split_var, b_split_var, c_split_var = self._get_A_b_c_split2_vars()
phi_errors_var = (T.batched_tensordot(T.batched_tensordot(A_split_var, U_var, axes=(3, 1)), U_var, axes=(2, 1))
- 2 * T.batched_tensordot(b_split_var, U_var, axes=(2, 1))
+ c_split_var)
phi_actions_var = U_var ** 2
phi_var = T.concatenate([phi_errors_var / self.repeats, phi_actions_var], axis=1)
return phi_var
示例6: factorization
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def factorization(self, batchSize, argsEmbA, argsEmbB, wC, wC1, wC2):
# l = batchSize
# k = self.k # embed size
# r = self.r # relation number
Afirst = T.batched_tensordot(wC, argsEmbA, axes=[[1], [1]]) # + self.Cb # [l, k, k] * [l, k] = [l, k]
Asecond = T.batched_dot(Afirst, argsEmbB) # [l, k] * [l, k] = [l]
spFirst = T.batched_dot(wC1, argsEmbA)
spSecond = T.batched_dot(wC2, argsEmbB)
return Asecond + spFirst + spSecond
示例7: negLeftFactorization
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def negLeftFactorization(self, batchSize, negEmbA, argsEmbB, wC, wC1, wC2):
# l = batchSize
# k = self.k # embed size
# r = self.r # relation number
Afirst = T.batched_tensordot(wC, negEmbA.dimshuffle(1, 2, 0), axes=[[1], [1]]) # [l, k, k] * [n, l, k] = [l, k, n]
Asecond = T.batched_tensordot(Afirst, argsEmbB, axes=[[1], [1]]) # [l, k, n] * [l, k] = [l, n]
spAfirst = T.batched_tensordot(wC1, negEmbA.dimshuffle(1, 2, 0), axes=[[1], [1]]) # [l,k] [l,k,n] = [l,n]
spSecond = T.batched_dot(wC2, argsEmbB)
return Asecond + spAfirst + spSecond.reshape((batchSize, 1))
示例8: negRightFactorization
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def negRightFactorization(self, batchSize, argsEmbA, negEmbB, wC, wC1, wC2):
Afirst = T.batched_tensordot(wC, argsEmbA, axes=[[1], [1]]) # [l, k, k] * [l, k] = [l, k]
Asecond = T.batched_tensordot(Afirst, negEmbB.dimshuffle(1, 2, 0), axes=[[1], [1]]) # [l, k] * [l, k, n] = [l, n]
spFirst = T.batched_dot(wC1, argsEmbA)
spAsecond = T.batched_tensordot(wC2, negEmbB.dimshuffle(1, 2, 0), axes=[[1], [1]]) # [l,k] [l,k,n] = [l,n]
return Asecond + spAsecond + spFirst.reshape((batchSize, 1))
示例9: factorization
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def factorization(self, batchSize, argsEmbA, argsEmbB, wC):
# first = T.tensordot(relationProbs, self.C, axes=[[1], [2]]) # [l,r] * [k,k,r] = [l, k, k]
Afirst = T.batched_tensordot(wC, argsEmbA, axes=[[1], [1]]) # [l, k, k] * [l, k] = [l, k]
Asecond = T.batched_dot(Afirst, argsEmbB) # [l, k] * [l, k] = [l]
# entropy = T.sum(T.log(relationProbs) * relationProbs, axis=1) # [l,r] * [l,r] = [l]
return Asecond
示例10: negFactorization1
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def negFactorization1(self, batchSize, negEmbA, argsEmbB, wC):
# first = T.tensordot(relationProbs, self.C, axes=[[1], [2]]) # [l,r] * [k,k,r] = [l, k, k]
Afirst = T.batched_tensordot(wC, negEmbA.dimshuffle(1, 2, 0), axes=[[1], [1]]) # [l, k, k] * [n, l, k] = [l, k, n]
Asecond = T.batched_tensordot(Afirst, argsEmbB, axes=[[1], [1]]) # [l, k, n] * [l, k] = [l, n]
return Asecond
示例11: negLeftMostFactorization
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def negLeftMostFactorization(self, batchSize, negEmbed, wC1):
# l = batchSize
# k = self.k # embed size
# r = self.r # relation number
Afirst = T.batched_tensordot(wC1, negEmbed.dimshuffle(1, 2, 0), axes=[[1], [1]]) # [l,k] [l,k,n] = [l,n]
return Afirst
示例12: negRightMostFactorization
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def negRightMostFactorization(self, batchSize, negEmbed, wC2):
# l = batchSize
# k = self.k # embed size
# r = self.r # relation number
Asecond = T.batched_tensordot(wC2, negEmbed.dimshuffle(1, 2, 0), axes=[[1], [1]]) # [l,k] [l,k,n] = [l,n]
return Asecond
示例13: get_output_for
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def get_output_for(self, inputs, **kwargs):
output = T.batched_tensordot(inputs[0], inputs[1], axes=[[1], [1]])
return output
示例14: batch_dot
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def batch_dot(x, y, axes=None):
'''Batchwise dot product.
batch_dot results in a tensor with less dimensions than the input.
If the number of dimensions is reduced to 1, we use `expand_dims` to
make sure that ndim is at least 2.
# Arguments
x, y: tensors with ndim >= 2
axes: list (or single) int with target dimensions
# Returns
A tensor with shape equal to the concatenation of x's shape
(less the dimension that was summed over) and y's shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to (batch_size, 1).
# Examples
Assume x = [[1, 2], [3, 4]] and y = [[5, 6], [7, 8]]
batch_dot(x, y, axes=1) = [[17, 53]] which is the main diagonal
of x.dot(y.T), although we never have to calculate the off-diagonal
elements.
Shape inference:
Let x's shape be (100, 20) and y's shape be (100, 30, 20).
If dot_axes is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in x's shape and y's shape:
x.shape[0] : 100 : append to output shape
x.shape[1] : 20 : do not append to output shape,
dimension 1 of x has been summed over. (dot_axes[0] = 1)
y.shape[0] : 100 : do not append to output shape,
always ignore first dimension of y
y.shape[1] : 30 : append to output shape
y.shape[2] : 20 : do not append to output shape,
dimension 2 of y has been summed over. (dot_axes[1] = 2)
output_shape = (100, 30)
'''
# TODO: `keras_shape` inference.
if isinstance(axes, int):
axes = (axes, axes)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [x.ndim - 1, y.ndim - 2]
out = T.batched_tensordot(x, y, axes=axes)
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
示例15: _get_pi2_var
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import batched_tensordot [as 別名]
def _get_pi2_var(self):
if not self.predictor.feature_jacobian_name:
raise NotImplementedError
X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars
names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)
y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
y_target_var,
alpha_var * y_target_var + (1 - alpha_var) * y_var)
for (y_var, y_target_var) in zip(y_vars, y_target_vars)]
jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
y_next_pred_vars = [T.flatten(next_feature_var, outdim=2) for next_feature_var in next_feature_vars]
y_next_pred_vars = [theano.clone(y_next_pred_var, replace={U_var: U_lin_var}) for y_next_pred_var in y_next_pred_vars]
z_vars = [y_target_var - y_next_pred_var + T.batched_tensordot(jac_var, U_lin_var, axes=(2, 1))
for (y_target_var, y_next_pred_var, jac_var) in zip(y_target_vars, y_next_pred_vars, jac_vars)]
feature_shapes = L.get_output_shape([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(self.predictor.feature_name)])
w_var, lambda_var = self.param_vars
A_var = None
b_var = None
normalized_w_vars = T.split(w_var / self.repeats, [feature_shape[1] for feature_shape in feature_shapes], len(feature_shapes))
for jac_var, z_var, normalized_w_var, feature_shape in zip(jac_vars, z_vars, normalized_w_vars, feature_shapes):
z_var = T.flatten(z_var)
jac_var = T.reshape(jac_var, (feature_shape[1], -1, self.action_space.shape[0]))
jac_w_var = T.reshape(jac_var * normalized_w_var[:, None, None], (-1, self.action_space.shape[0]))
jac_var = jac_var.reshape((-1, self.action_space.shape[0]))
if A_var is None:
A_var = jac_var.T.dot(jac_w_var)
else:
A_var += jac_var.T.dot(jac_w_var)
if b_var is None:
b_var = z_var.dot(jac_w_var)
else:
b_var += z_var.dot(jac_w_var)
A_var += T.diag(lambda_var)
pi_var = T.dot(T.nlinalg.matrix_inverse(A_var), b_var) # preprocessed units
return pi_var