本文整理匯總了Python中theano.tensor.flatten方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.flatten方法的具體用法?Python tensor.flatten怎麽用?Python tensor.flatten使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.flatten方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: predict_batchnorm
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def predict_batchnorm(_x, _params, n_layers=3):
w = _params[0]
h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
hs = [h0]
output = []
for n in range(n_layers):
hin = hs[-1]
w, g, b = _params[1 + 3 * n:1 + 3 * (n + 1)]
h_o = dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2))
hout = lrelu(batchnorm(h_o, g=g, b=b))
hs.append(hout)
output.append(h_o)
h = T.flatten(hs[-1], 2)
y = tanh(T.dot(h, _params[-1]))
return y, output
return y, output
示例2: model
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def model(X, w, w2, w3, w4, p_drop_conv, p_drop_hidden):
l1a = rectify(conv2d(X, w, border_mode='full'))
l1 = max_pool_2d(l1a, (2, 2))
l1 = dropout(l1, p_drop_conv)
l2a = rectify(conv2d(l1, w2))
l2 = max_pool_2d(l2a, (2, 2))
l2 = dropout(l2, p_drop_conv)
l3a = rectify(conv2d(l2, w3))
l3b = max_pool_2d(l3a, (2, 2))
l3 = T.flatten(l3b, outdim=2)
l3 = dropout(l3, p_drop_conv)
l4 = rectify(T.dot(l3, w4))
l4 = dropout(l4, p_drop_hidden)
pyx = softmax(T.dot(l4, w_o))
return l1, l2, l3, l4, pyx
示例3: _get_jac_vars
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def _get_jac_vars(self):
if not self.predictor.feature_jacobian_name:
raise NotImplementedError
X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars
names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)
y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
y_target_var,
alpha_var * y_target_var + (1 - alpha_var) * y_var)
for (y_var, y_target_var) in zip(y_vars, y_target_vars)]
jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
return jac_vars
示例4: _get_jac_z_vars
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def _get_jac_z_vars(self):
if not self.predictor.feature_jacobian_name:
raise NotImplementedError
X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars
names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)
y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
y_target_var,
alpha_var * y_target_var + (1 - alpha_var) * y_var)
for (y_var, y_target_var) in zip(y_vars, y_target_vars)]
jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
y_next_pred_vars = [T.flatten(next_feature_var, outdim=2) for next_feature_var in next_feature_vars]
y_next_pred_vars = [theano.clone(y_next_pred_var, replace={U_var: U_lin_var}) for y_next_pred_var in y_next_pred_vars]
z_vars = [y_target_var - y_next_pred_var + T.batched_tensordot(jac_var, U_lin_var, axes=(2, 1))
for (y_target_var, y_next_pred_var, jac_var) in zip(y_target_vars, y_next_pred_vars, jac_vars)]
return jac_vars, z_vars
示例5: build_bilinear_net
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def build_bilinear_net(input_shapes, X_var=None, U_var=None, X_diff_var=None, axis=1):
x_shape, u_shape = input_shapes
X_var = X_var or T.tensor4('X')
U_var = U_var or T.matrix('U')
X_diff_var = X_diff_var or T.tensor4('X_diff')
X_next_var = X_var + X_diff_var
l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var)
l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var)
l_x_diff_pred = LT.BilinearLayer([l_x, l_u], axis=axis)
l_x_next_pred = L.ElemwiseMergeLayer([l_x, l_x_diff_pred], T.add)
l_y = L.flatten(l_x)
l_y_diff_pred = L.flatten(l_x_diff_pred)
X_next_pred_var = lasagne.layers.get_output(l_x_next_pred)
loss = ((X_next_var - X_next_pred_var) ** 2).mean(axis=0).sum() / 2.
net_name = 'BilinearNet'
input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]])
pred_layers = OrderedDict([('y_diff_pred', l_y_diff_pred), ('y', l_y), ('x0_next_pred', l_x_next_pred)])
return net_name, input_vars, pred_layers, loss
示例6: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def __init__(self, inputs=None, ndim=1):
"""
Parameters
----------
inputs : tuple(shape, `Theano.TensorType`)
tuple(shape, `Theano.TensorType`) or None describing the input to use for this layer.
`shape` will be a monad tuple representing known sizes for each dimension in the `Theano.TensorType`.
If 4D images as input, expect formatted as (batch_size, #channels, rows, cols).
ndim : int
The number of dimensions for the result to have. (Default 1).
"""
super(Flatten, self).__init__(inputs=inputs, ndim=ndim)
input_shape, self.input = self.inputs[0]
in_ndim = len(input_shape)
assert 0 < ndim <= in_ndim, \
"Number of resulting dimensions ndim has to be greater than zero and less than current dims."
kept_size = tuple(input_shape[:ndim-1])
flat_size = (None, ) if None in input_shape[ndim-1:] else (prod(input_shape[ndim-1:]), )
self.output_size = kept_size + flat_size
self.output = flatten(self.input, ndim)
示例7: sparse_categorical_crossentropy
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
output_dimensions = list(range(len(int_shape(output))))
if axis != -1 and axis not in output_dimensions:
raise ValueError(
'{}{}{}'.format(
'Unexpected channels axis {}. '.format(axis),
'Expected to be -1 or one of the axes of `output`, ',
'which has {} dimensions.'.format(len(int_shape(output)))))
# If the channels are not in the last axis, move them to be there:
if axis != -1 and axis != output_dimensions[-1]:
permutation = output_dimensions[:axis]
permutation += output_dimensions[axis + 1:] + [axis]
output = permute_dimensions(output, permutation)
target = permute_dimensions(target, permutation)
target = T.cast(T.flatten(target), 'int32')
target = T.extra_ops.to_one_hot(target, nb_class=output.shape[-1])
target = reshape(target, shape(output))
return categorical_crossentropy(target, output, from_logits, axis=-1)
示例8: discrim
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def discrim(X, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy):
filter_shape = (Channel[1] , Channel[0], kernal[0], kernal[0], kernal[0])
Dl1 = lrelu(batchnorm(conv(X,w1,filter_shape = filter_shape),g = g1, b = b1))
filter_shape = (Channel[2] , Channel[1], kernal[1], kernal[1], kernal[1])
Dl2 = lrelu(batchnorm(conv(Dl1, w2,filter_shape = filter_shape), g = g2, b= b2))
filter_shape = (Channel[3] , Channel[2], kernal[2], kernal[2], kernal[2])
Dl3 = lrelu(batchnorm(conv(Dl2,w3,filter_shape = filter_shape), g = g3, b= b3))
filter_shape = (Channel[4] , Channel[3], kernal[3], kernal[3], kernal[3])
Dl4 = lrelu(batchnorm(conv(Dl3,w4,filter_shape = filter_shape), g = g4, b = b4))
Dl4 = T.flatten(Dl4,2)
DlY = sigmoid(T.dot(Dl4,wy))
return DlY
示例9: encoder
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def encoder(X, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, wz):
filter_shape = (Channel[1] , Channel[0], kernal[0], kernal[0], kernal[0])
Dl1 = lrelu(batchnorm(conv(X,w1,filter_shape = filter_shape),g = g1, b = b1))
filter_shape = (Channel[2] , Channel[1], kernal[1], kernal[1], kernal[1])
Dl2 = lrelu(batchnorm(conv(Dl1, w2,filter_shape = filter_shape), g = g2, b= b2))
filter_shape = (Channel[3] , Channel[2], kernal[2], kernal[2], kernal[2])
Dl3 = lrelu(batchnorm(conv(Dl2,w3,filter_shape = filter_shape), g = g3, b= b3))
filter_shape = (Channel[4] , Channel[3], kernal[3], kernal[3], kernal[3])
Dl4 = lrelu(batchnorm(conv(Dl3,w4,filter_shape = filter_shape), g = g4, b = b4))
Dl4 = T.flatten(Dl4,2)
DlZ = sigmoid(T.dot(Dl4,wz))
return DlZ
# def gen_Z(dist):
# mu = dist[:Nz]
# sigma = dist[Nz:]
示例10: compute_output
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def compute_output(self, network, in_vw):
outdim = network.find_hyperparameter(["outdim"])
out_var = T.flatten(in_vw.variable, outdim=outdim)
trailing_axes = in_vw.shape[outdim - 1:]
if any(a is None for a in trailing_axes):
final_size = None
else:
final_size = np.prod(trailing_axes)
out_shape = in_vw.shape[:outdim - 1] + (final_size,)
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={"output"}
)
示例11: sample_variance_penalty_aggregator
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def sample_variance_penalty_aggregator(costs,
kappa=0.25,
penalty_type="per_sample"):
if costs.ndim < 1:
assert False
if penalty_type == "per_sample":
# convert to 1 cost per sample
if costs.ndim > 1:
if costs.ndim > 2:
costs = T.flatten(costs, 2)
costs = costs.mean(axis=1)
elif penalty_type == "per_element":
# leave it as it is
pass
else:
raise ValueError("incorrect penalty_type: {}".format(penalty_type))
return costs.mean() + kappa * costs.std()
示例12: discrim
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def discrim(X):
current_input = dropout(X, 0.3)
### encoder ###
cv1 = relu(dnn_conv(current_input, aew1, subsample=(1,1), border_mode=(1,1)))
cv2 = relu(batchnorm(dnn_conv(cv1, aew2, subsample=(4,4), border_mode=(2,2)), g=aeg2, b=aeb2))
cv3 = relu(batchnorm(dnn_conv(cv2, aew3, subsample=(1,1), border_mode=(1,1)), g=aeg3, b=aeb3))
cv4 = relu(batchnorm(dnn_conv(cv3, aew4, subsample=(4,4), border_mode=(2,2)), g=aeg4, b=aeb4))
cv5 = relu(batchnorm(dnn_conv(cv4, aew5, subsample=(1,1), border_mode=(1,1)), g=aeg5, b=aeb5))
cv6 = relu(batchnorm(dnn_conv(cv5, aew6, subsample=(4,4), border_mode=(0,0)), g=aeg6, b=aeb6))
### decoder ###
dv6 = relu(batchnorm(deconv(cv6, aew6, subsample=(4,4), border_mode=(0,0)), g=aeg6t, b=aeb6t))
dv5 = relu(batchnorm(deconv(dv6, aew5, subsample=(1,1), border_mode=(1,1)), g=aeg5t, b=aeb5t))
dv4 = relu(batchnorm(deconv(dv5, aew4, subsample=(4,4), border_mode=(2,2)), g=aeg4t, b=aeb4t))
dv3 = relu(batchnorm(deconv(dv4, aew3, subsample=(1,1), border_mode=(1,1)), g=aeg3t, b=aeb3t))
dv2 = relu(batchnorm(deconv(dv3, aew2, subsample=(4,4), border_mode=(2,2)), g=aeg2t, b=aeb2t))
dv1 = tanh(deconv(dv2, aew1, subsample=(1,1), border_mode=(1,1)))
rX = dv1
mse = T.sqrt(T.sum(T.abs_(T.flatten(X-rX, 2)),axis=1)) + T.sqrt(T.sum(T.flatten((X-rX)**2, 2), axis=1)) # L1 and L2 loss
return T.flatten(cv6, 2), rX, mse
示例13: rbf_kernel
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def rbf_kernel(X0):
XY = T.dot(X0, X0.transpose())
x2 = T.reshape(T.sum(T.square(X0), axis=1), (X0.shape[0], 1))
X2e = T.repeat(x2, X0.shape[0], axis=1)
H = T.sub(T.add(X2e, X2e.transpose()), 2 * XY)
V = H.flatten()
# median distance
h = T.switch(T.eq((V.shape[0] % 2), 0),
# if even vector
T.mean(T.sort(V)[ ((V.shape[0] // 2) - 1) : ((V.shape[0] // 2) + 1) ]),
# if odd vector
T.sort(V)[V.shape[0] // 2])
h = T.sqrt(0.5 * h / T.log(X0.shape[0].astype('float32') + 1.0)) / 2.
Kxy = T.exp(-H / h ** 2 / 2.0)
neighbors = T.argsort(H, axis=1)[:, 1]
return Kxy, neighbors, h
示例14: svgd_gradient
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def svgd_gradient(X0):
hidden, _, mse = discrim(X0)
grad = -1.0 * T.grad( mse.sum(), X0)
kxy, neighbors, h = rbf_kernel(hidden) #TODO
coff = T.exp( - T.sum((hidden[neighbors] - hidden)**2, axis=1) / h**2 / 2.0 )
v = coff.dimshuffle(0, 'x') * (-hidden[neighbors] + hidden) / h**2
X1 = X0[neighbors]
hidden1, _, _ = discrim(X1)
dxkxy = T.Lop(hidden1, X1, v)
#svgd_grad = (T.dot(kxy, T.flatten(grad, 2)).reshape(dxkxy.shape) + dxkxy) / T.sum(kxy, axis=1).dimshuffle(0, 'x', 'x', 'x')
svgd_grad = grad + dxkxy / 2.
return grad, svgd_grad, dxkxy
示例15: discrim
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import flatten [as 別名]
def discrim(X):
current_input = dropout(X, 0.3)
### encoder ###
cv1 = relu(dnn_conv(current_input, aew1, subsample=(1,1), border_mode=(1,1)))
cv2 = relu(batchnorm(dnn_conv(cv1, aew2, subsample=(4,4), border_mode=(2,2)), g=aeg2, b=aeb2))
cv3 = relu(batchnorm(dnn_conv(cv2, aew3, subsample=(1,1), border_mode=(1,1)), g=aeg3, b=aeb3))
cv4 = relu(batchnorm(dnn_conv(cv3, aew4, subsample=(4,4), border_mode=(2,2)), g=aeg4, b=aeb4))
cv5 = relu(batchnorm(dnn_conv(cv4, aew5, subsample=(1,1), border_mode=(1,1)), g=aeg5, b=aeb5))
cv6 = relu(batchnorm(dnn_conv(cv5, aew6, subsample=(4,4), border_mode=(0,0)), g=aeg6, b=aeb6))
### decoder ###
dv6 = relu(batchnorm(deconv(cv6, aew6, subsample=(4,4), border_mode=(0,0)), g=aeg6t, b=aeb6t))
dv5 = relu(batchnorm(deconv(dv6, aew5, subsample=(1,1), border_mode=(1,1)), g=aeg5t, b=aeb5t))
dv4 = relu(batchnorm(deconv(dv5, aew4, subsample=(4,4), border_mode=(2,2)), g=aeg4t, b=aeb4t))
dv3 = relu(batchnorm(deconv(dv4, aew3, subsample=(1,1), border_mode=(1,1)), g=aeg3t, b=aeb3t))
dv2 = relu(batchnorm(deconv(dv3, aew2, subsample=(4,4), border_mode=(2,2)), g=aeg2t, b=aeb2t))
dv1 = tanh(deconv(dv2, aew1, subsample=(1,1), border_mode=(1,1)))
rX = dv1
mse = T.sqrt(T.sum(T.abs_(T.flatten(X-rX, 2)),axis=1)) + T.sqrt(T.sum(T.flatten((X-rX)**2, 2), axis=1))
return T.flatten(cv6, 2), rX, mse