本文整理匯總了Python中theano.tensor.var方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.var方法的具體用法?Python tensor.var怎麽用?Python tensor.var使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.var方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: batch_normalization
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
'''Apply batch normalization on x given mean, var, beta and gamma.
'''
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
if not hasattr(T.nnet.bn, 'batch_normalization_test'):
return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)
if mean.ndim == 1:
# based on TensorFlow's default: normalize along rightmost dimension
reduction_axes = range(x.ndim - 1)
else:
reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]
return T.nnet.bn.batch_normalization_test(
x, gamma, beta, mean, var, reduction_axes, epsilon)
# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
示例2: batch_normalization
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Apply batch normalization on x given mean, var, beta and gamma.
"""
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
if not hasattr(T.nnet.bn, 'batch_normalization_test'):
return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)
if gamma is None:
gamma = ones_like(var)
if beta is None:
beta = zeros_like(mean)
if mean.ndim == 1:
# based on TensorFlow's default: normalize along rightmost dimension
reduction_axes = list(range(x.ndim - 1))
else:
reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]
return T.nnet.bn.batch_normalization_test(
x, gamma, beta, mean, var, reduction_axes, epsilon)
# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
示例3: batch_normalization
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
"""Apply batch normalization on x given mean, var, beta and gamma.
"""
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
if not hasattr(T.nnet.bn, 'batch_normalization_test'):
return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)
if gamma is None:
gamma = ones_like(var)
if beta is None:
beta = zeros_like(mean)
if mean.ndim == 1:
# based on TensorFlow's default: normalize along rightmost dimension
reduction_axes = list(range(x.ndim - 1))
else:
reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]
return T.nnet.bn.batch_normalization_test(
x, gamma, beta, mean, var, reduction_axes, epsilon)
# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
示例4: compute_output
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def compute_output(self, network, in_vw):
in_var = in_vw.variable
epsilon = network.find_hyperparameter(["epsilon"], 1e-8)
axis = tuple([i for i in range(in_vw.ndim) if i != 1])
mean = in_var.mean(axis=axis, keepdims=True)
std = T.sqrt(in_var.var(axis=axis, keepdims=True) + epsilon)
gamma = self._make_param(network, in_vw, "gamma", {"weight"})
beta = self._make_param(network, in_vw, "beta", {"bias"})
network.create_vw(
name="default",
# NOTE: 20150907 it is faster to combine gamma + std
# before broadcasting
variable=(in_var - mean) * ((gamma + 1) / std) + beta,
shape=in_vw.shape,
tags={"output"},
)
示例5: create_updates
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def create_updates(self, input):
if self.mode == 0:
now_mean = T.mean(input, axis=0)
now_var = T.var(input, axis=0)
batch = T.cast(input.shape[0], theano.config.floatX)
else:
now_mean = T.mean(input, axis=(0,2,3))
now_var = T.var(input, axis=(0,2,3))
batch = T.cast(input.shape[0]*input.shape[2]*input.shape[3], theano.config.floatX)
if self.updates is None:
new_mean = self.momentum * self.mean + (1.0-self.momentum) * now_mean
new_var = self.momentum * self.var + (1.0-self.momentum) * ((batch+1.0)/batch*now_var)
else:
new_mean = self.momentum * self.updates[0][1] + (1.0-self.momentum) * now_mean
new_var = self.momentum * self.updates[1][1] + (1.0-self.momentum) * ((batch+1.0)/batch*now_var)
self.updates = [(self.mean, new_mean), (self.var, new_var)]
示例6: get_result
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def get_result(self, input, create_updates) :
if create_updates:
self.create_updates(input)
# returns BN result for given input.
epsilon = np.float64(1e-06).astype(theano.config.floatX)
if self.mode == 0:
now_mean = T.mean(input, axis=0)
now_var = T.var(input, axis=0)
else:
now_mean = T.mean(input, axis=(0,2,3))
now_var = T.var(input, axis=(0,2,3))
now_mean = self.run_mode * self.mean + (1.0-self.run_mode) * now_mean
now_var = self.run_mode * self.var + (1.0-self.run_mode) * now_var
if self.mode == 0:
output = self.gamma * (input - now_mean) / (T.sqrt(now_var+epsilon)) + self.beta
else:
output = self.gamma.dimshuffle(('x', 0, 'x', 'x')) * (input - now_mean.dimshuffle(('x', 0, 'x', 'x'))) \
/ (T.sqrt(now_var+epsilon).dimshuffle(('x', 0, 'x', 'x'))) + self.beta.dimshuffle(('x', 0, 'x', 'x'))
return output
示例7: get_step_inputs
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def get_step_inputs(self, input_var, states=None, mask=None, additional_inputs=None):
"""
:type input_var: T.var
:rtype: dict
"""
step_inputs = {}
if self._input_type == "sequence":
if not additional_inputs:
additional_inputs = []
if mask:
step_inputs['mask'] = mask.dimshuffle(1, 0)
step_inputs.update(self.merge_inputs(input_var, additional_inputs=additional_inputs))
else:
# step_inputs["mask"] = mask.dimshuffle((1,0)) if mask else None
if additional_inputs:
step_inputs.update(self.merge_inputs(None, additional_inputs=additional_inputs))
if states:
for name in self.state_names:
step_inputs[name] = states[name]
return step_inputs
示例8: normalize_batch_in_training
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=0.0001):
'''Compute mean and std for batch then apply batch_normalization on batch.
'''
var = x.var(reduction_axes)
mean = x.mean(reduction_axes)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(x.shape[axis])
target_shape = T.stack(*target_shape)
broadcast_mean = T.reshape(mean, target_shape)
broadcast_var = T.reshape(var, target_shape)
broadcast_beta = T.reshape(beta, target_shape)
broadcast_gamma = T.reshape(gamma, target_shape)
normed = batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma,
epsilon)
return normed, mean, var
示例9: var
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def var(x, axis=None, keepdims=False):
return T.var(x, axis=axis, keepdims=keepdims)
示例10: _old_normalize_batch_in_training
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def _old_normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
'''Computes mean and std for batch then apply batch_normalization on batch.
'''
dev = theano.config.device
use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
if use_cudnn:
broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
try:
normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
var = T.inv(stdinv ** 2)
return normed, T.flatten(mean), T.flatten(var)
except AttributeError:
pass
var = x.var(reduction_axes)
mean = x.mean(reduction_axes)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(x.shape[axis])
target_shape = T.stack(*target_shape)
broadcast_mean = T.reshape(mean, target_shape)
broadcast_var = T.reshape(var, target_shape)
broadcast_beta = T.reshape(beta, target_shape)
broadcast_gamma = T.reshape(gamma, target_shape)
normed = batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma,
epsilon)
return normed, mean, var
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
示例11: DPrint
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def DPrint(name, var):
if PRINT_VARS is False:
return var
return theano.printing.Print(name)(var)
示例12: NormalizationOperator
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def NormalizationOperator(normop_type, x, gamma, mask, estimated_mean=0.0, estimated_var=1.0):
if normop_type.upper() == 'BN':
if x.ndim == 3:
return FeedforwardBatchNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0)
elif x.ndim == 2:
return RecurrentBatchNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0)
elif normop_type.upper() == 'LN':
return LayerNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0)
elif normop_type.upper() == 'NONE' or normop_type.upper() == '':
assert x.ndim == 3 or x.ndim == 2
output = x + 0.0*gamma
if x.ndim == 3:
x_mean = T.mean(x, axis=1).dimshuffle(0, 1, 'x')
x_var = T.var(x, axis=1).dimshuffle(0, 1, 'x')
else:
x_mean = T.mean(x, axis=1).dimshuffle(0, 'x')
x_var = T.var(x, axis=1).dimshuffle(0, 'x')
return output, x_mean[0], x_var[0]
else:
raise ValueError("Error! normop_type must take a value in set {\'BN\', \'LN\', \'NONE\'}!")
# Batch normalization of input variable on first and second tensor indices (time x batch example x hidden units)
# Elements where mask is zero, will not be used to compute the mean and variance estimates,
# however these elements will still be batch normalized.
示例13: LayerNormalization
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def LayerNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0):
assert x.ndim == 3 or x.ndim == 2
if x.ndim == 3:
x_mean = T.mean(x, axis=2).dimshuffle(0, 1, 'x')
x_var = T.var(x, axis=2).dimshuffle(0, 1, 'x')
return gamma*((x - x_mean) / T.sqrt(x_var+1e-7)), x_mean[0, 0], x_var[0, 0]
elif x.ndim == 2:
x_mean = T.mean(x, axis=1).dimshuffle(0, 'x')
x_var = T.var(x, axis=1).dimshuffle(0, 'x')
return gamma*((x - x_mean) / T.sqrt(x_var+1e-7)), x_mean[0], x_var[0]
# Does theano.batched_dot. If last_axis is on it will loop over the last axis, otherwise it will loop over the first axis.
示例14: get_stats
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def get_stats(input, stat=None):
"""
Returns a dictionary mapping the name of the statistic to the result on the input.
Currently gets mean, var, std, min, max, l1, l2.
Parameters
----------
input : tensor
Theano tensor to grab stats for.
Returns
-------
dict
Dictionary of all the statistics expressions {string_name: theano expression}
"""
stats = {
'mean': T.mean(input),
'var': T.var(input),
'std': T.std(input),
'min': T.min(input),
'max': T.max(input),
'l1': input.norm(L=1),
'l2': input.norm(L=2),
#'num_nonzero': T.sum(T.nonzero(input)),
}
stat_list = raise_to_list(stat)
compiled_stats = {}
if stat_list is None:
return stats
for stat in stat_list:
if isinstance(stat, string_types) and stat in stats:
compiled_stats.update({stat: stats[stat]})
return compiled_stats
示例15: _moments
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import var [as 別名]
def _moments(x, axes, shift=None, keep_dims=False):
''' Wrapper over tensorflow backend call '''
if K.backend() == 'tensorflow':
import tensorflow as tf
return tf.nn.moments(x, axes, shift=shift, keep_dims=keep_dims)
elif K.backend() == 'theano':
import theano.tensor as T
mean_batch = T.mean(x, axis=axes, keepdims=keep_dims)
var_batch = T.var(x, axis=axes, keepdims=keep_dims)
return mean_batch, var_batch
else:
raise RuntimeError("Currently does not support CNTK backend")