本文整理汇总了Python中theano.tensor.square方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.square方法的具体用法?Python tensor.square怎么用?Python tensor.square使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.square方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: adam_updates
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def adam_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
updates = []
grads = T.grad(cost, params)
t = th.shared(np.cast[th.config.floatX](1.))
for p, g in zip(params, grads):
v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
v_t = mom1*v + (1. - mom1)*g
mg_t = mom2*mg + (1. - mom2)*T.square(g)
v_hat = v_t / (1. - mom1 ** t)
mg_hat = mg_t / (1. - mom2 ** t)
g_t = v_hat / T.sqrt(mg_hat + 1e-8)
p_t = p - lr * g_t
updates.append((v, v_t))
updates.append((mg, mg_t))
updates.append((p, p_t))
updates.append((t, t+1))
return updates
示例2: get_output_for
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic:
norm_features = (input-self.avg_batch_mean.dimshuffle(*self.dimshuffle_args)) / T.sqrt(1e-6 + self.avg_batch_var).dimshuffle(*self.dimshuffle_args)
else:
batch_mean = T.mean(input,axis=self.axes_to_sum).flatten()
centered_input = input-batch_mean.dimshuffle(*self.dimshuffle_args)
batch_var = T.mean(T.square(centered_input),axis=self.axes_to_sum).flatten()
batch_stdv = T.sqrt(1e-6 + batch_var)
norm_features = centered_input / batch_stdv.dimshuffle(*self.dimshuffle_args)
# BN updates
new_m = 0.9*self.avg_batch_mean + 0.1*batch_mean
new_v = 0.9*self.avg_batch_var + T.cast((0.1*input.shape[0])/(input.shape[0]-1),th.config.floatX)*batch_var
self.bn_updates = [(self.avg_batch_mean, new_m), (self.avg_batch_var, new_v)]
if hasattr(self, 'g'):
activation = norm_features*self.g.dimshuffle(*self.dimshuffle_args)
else:
activation = norm_features
if hasattr(self, 'b'):
activation += self.b.dimshuffle(*self.dimshuffle_args)
return self.nonlinearity(activation)
示例3: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def __init__(self):
def f(x, u, i, terminal):
if terminal:
ctrl_cost = T.zeros_like(x[..., 0])
else:
ctrl_cost = T.square(u).sum(axis=-1)
# x: (batch_size, 8)
# x[..., 0:4]: qpos
# x[..., 4:8]: qvel, time derivatives of qpos, not used in the cost.
theta = x[..., 0] # qpos[0]: angle of joint 0
phi = x[..., 1] # qpos[1]: angle of joint 1
target_xpos = x[..., 2:4] # qpos[2:4], target x & y coordinate
body1_xpos = 0.1 * T.stack([T.cos(theta), T.sin(theta)], axis=1)
tip_xpos_incr = 0.11 * T.stack([T.cos(phi), T.sin(phi)], axis=1)
tip_xpos = body1_xpos + tip_xpos_incr
delta = tip_xpos - target_xpos
state_cost = T.sqrt(T.sum(delta * delta, axis=-1))
cost = state_cost + ctrl_cost
return cost
super().__init__(f, state_size=8, action_size=2)
示例4: l2normalize
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def l2normalize(layer, train_scale=True):
W_param = layer.W
s = W_param.get_value().shape
if len(s)==4:
axes_to_sum = (1,2,3)
dimshuffle_args = [0,'x','x','x']
k = s[0]
else:
axes_to_sum = 0
dimshuffle_args = ['x',0]
k = s[1]
layer.W_scale = layer.add_param(lasagne.init.Constant(1.),
(k,), name="W_scale", trainable=train_scale, regularizable=False)
layer.W = W_param * (layer.W_scale/T.sqrt(1e-6 + T.sum(T.square(W_param),axis=axes_to_sum))).dimshuffle(*dimshuffle_args)
return layer
# fully connected layer with weight normalization
示例5: get_output_for
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic:
norm_features = (input-self.avg_batch_mean.dimshuffle(*self.dimshuffle_args)) / T.sqrt(1e-6 + self.avg_batch_var).dimshuffle(*self.dimshuffle_args)
else:
batch_mean = T.mean(input,axis=self.axes_to_sum).flatten()
centered_input = input-batch_mean.dimshuffle(*self.dimshuffle_args)
batch_var = T.mean(T.square(centered_input),axis=self.axes_to_sum).flatten()
batch_stdv = T.sqrt(1e-6 + batch_var)
norm_features = centered_input / batch_stdv.dimshuffle(*self.dimshuffle_args)
# BN updates
new_m = 0.9*self.avg_batch_mean + 0.1*batch_mean
new_v = 0.9*self.avg_batch_var + T.cast((0.1*input.shape[0])/(input.shape[0]-1.), th.config.floatX)*batch_var
self.bn_updates = [(self.avg_batch_mean, new_m), (self.avg_batch_var, new_v)]
if hasattr(self, 'g'):
activation = norm_features*self.g.dimshuffle(*self.dimshuffle_args)
else:
activation = norm_features
if hasattr(self, 'b'):
activation += self.b.dimshuffle(*self.dimshuffle_args)
return self.nonlinearity(activation)
示例6: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def __init__(self, input, centerbias = None, alpha=1.0):
self.input = input
if centerbias is None:
centerbias = np.ones(12)
self.alpha = theano.shared(value = np.array(alpha).astype(theano.config.floatX), name='alpha')
self.centerbias_ys = theano.shared(value=np.array(centerbias, dtype=theano.config.floatX), name='centerbias_ys')
self.centerbias_xs = theano.shared(value=np.linspace(0, 1, len(centerbias), dtype=theano.config.floatX), name='centerbias_xs')
height = T.cast(input.shape[0], theano.config.floatX)
width = T.cast(input.shape[1], theano.config.floatX)
x_coords = (T.arange(width) - 0.5*width) / (0.5*width)
y_coords = (T.arange(height) - 0.5*height) / (0.5*height) + 0.0001 # We cannot have zeros in there because of grad
x_coords = x_coords.dimshuffle('x', 0)
y_coords = y_coords.dimshuffle(0, 'x')
dists = T.sqrt(T.square(x_coords) + self.alpha*T.square(y_coords))
self.max_dist = T.sqrt(1 + self.alpha)
self.dists = dists/self.max_dist
self.factors = nonlinearity(self.dists, self.centerbias_xs, self.centerbias_ys, len(centerbias))
apply_centerbias = T.gt(self.centerbias_ys.shape[0], 2)
self.output = ifelse(apply_centerbias, self.input*self.factors, self.input)
self.params = [self.centerbias_ys, self.alpha]
示例7: gaussian_log_likelihood
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def gaussian_log_likelihood(targets, pred_mean, pred_std=None):
''' Computes the log likelihood for gaussian distributed predictions.
This assumes diagonal covariances
'''
delta = pred_mean - targets
# note that if we have nois be a 1xD vector, broadcasting
# rules apply
if pred_std:
# sum over output dimensions
lml = -tt.square(delta/pred_std).sum(-1)*0.5 - tt.log(pred_std).sum(-1)
else:
# sum ove output dimensions
lml = -tt.square(delta).sum(-1)*0.5
# sum over all examples
return lml.sum()
示例8: rbf_kernel
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def rbf_kernel(X0):
XY = T.dot(X0, X0.transpose())
x2 = T.reshape(T.sum(T.square(X0), axis=1), (X0.shape[0], 1))
X2e = T.repeat(x2, X0.shape[0], axis=1)
H = T.sub(T.add(X2e, X2e.transpose()), 2 * XY)
V = H.flatten()
# median distance
h = T.switch(T.eq((V.shape[0] % 2), 0),
# if even vector
T.mean(T.sort(V)[ ((V.shape[0] // 2) - 1) : ((V.shape[0] // 2) + 1) ]),
# if odd vector
T.sort(V)[V.shape[0] // 2])
h = T.sqrt(0.5 * h / T.log(X0.shape[0].astype('float32') + 1.0)) / 2.
Kxy = T.exp(-H / h ** 2 / 2.0)
neighbors = T.argsort(H, axis=1)[:, 1]
return Kxy, neighbors, h
示例9: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def __init__(self, dim, eps=1e-6, init_count=0, init_mean=0., init_meansq=1.):
'''
Args:
dim: dimension of the space of points to be standardized
eps: small constant to add to denominators to prevent division by 0
init_count, init_mean, init_meansq: initial values for accumulators
Note:
if init_count is 0, then init_mean and init_meansq have no effect beyond
the first call to update(), which will ignore their values and
replace them with values from a new batch of data.
'''
self._eps = eps
self._dim = dim
with variable_scope(type(self).__name__) as self.__varscope:
self._count = get_variable('count', np.array(float(init_count)), trainable=False)
self._mean_1_D = get_variable('mean_1_D', np.full((1, self._dim), init_mean), broadcastable=(True,False), trainable=False)
self._meansq_1_D = get_variable('meansq_1_D', np.full((1, self._dim), init_meansq), broadcastable=(True,False), trainable=False)
self._stdev_1_D = tensor.sqrt(tensor.nnet.relu(self._meansq_1_D - tensor.square(self._mean_1_D)))
# Relu ensures inside is nonnegative. maybe the better choice would have been to
# add self._eps inside the square root, but I'm keeping things this way to preserve
# backwards compatibility with existing saved models.
self.get_mean = self._mean_1_D.get_value
self.get_stdev = theano.function([], self._stdev_1_D[0,:]) # TODO: return with shape (1,D)
示例10: adam
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def adam(cost, params, lr, beta1=0.9, beta2=0.999, eps=1e-8):
updates = []
grads = tensor.grad(cost, params); assert len(params) == len(grads)
t0 = theano.shared(np.array(0., dtype=theano.config.floatX))
t = t0 + 1
corr1 = (1 - beta1**t)
corr2 = (1 - beta2**t)
alpha = lr * tensor.sqrt(corr2) / corr1
for p, g in zip(params, grads):
m = theano.shared(value=np.zeros(p.get_value().shape, dtype=theano.config.floatX), broadcastable=p.broadcastable)
v = theano.shared(value=np.zeros(p.get_value().shape, dtype=theano.config.floatX), broadcastable=p.broadcastable)
m_t = beta1 * m + (1 - beta1) * g
v_t = beta2 * v + (1 - beta2) * tensor.square(g)
p_t = p - alpha * m_t/(tensor.sqrt(v_t) + eps)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((t0, t))
return updates
示例11: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def __init__(self, net, mixfrac=1.0, maxiter=25):
EzPickle.__init__(self, net, mixfrac, maxiter)
self.net = net
self.mixfrac = mixfrac
x_nx = net.input
self.predict = theano.function([x_nx], net.output, **FNOPTS)
ypred_ny = net.output
ytarg_ny = T.matrix("ytarg")
var_list = net.trainable_weights
l2 = 1e-3 * T.add(*[T.square(v).sum() for v in var_list])
N = x_nx.shape[0]
mse = T.sum(T.square(ytarg_ny - ypred_ny))/N
symb_args = [x_nx, ytarg_ny]
loss = mse + l2
self.opt = LbfgsOptimizer(loss, var_list, symb_args, maxiter=maxiter, extra_losses={"mse":mse, "l2":l2})
示例12: square
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def square(x):
return T.sqr(x)
示例13: l2_normalize
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import square [as 别名]
def l2_normalize(x, axis):
norm = T.sqrt(T.sum(T.square(x), axis=axis, keepdims=True))
return x / norm