本文整理汇总了Python中theano.tensor.sqrt方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.sqrt方法的具体用法?Python tensor.sqrt怎么用?Python tensor.sqrt使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.sqrt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: adagrad
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def adagrad(self, cost, params, lr=1.0, epsilon=1e-6,consider_constant=None):
"""
Adagrad. Based on http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf
"""
lr = theano.shared(np.float32(lr).astype(floatX))
epsilon = theano.shared(np.float32(epsilon).astype(floatX))
gradients = self.get_gradients(cost, params,consider_constant)
gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
updates = []
for param, gradient, gsum in zip(params, gradients, gsums):
new_gsum = gsum + gradient ** 2.
updates.append((gsum, new_gsum))
updates.append((param, param - lr * gradient / (T.sqrt(gsum + epsilon))))
return updates
示例2: adadelta
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def adadelta(self, cost, params, rho=0.95, epsilon=1e-6,consider_constant=None):
"""
Adadelta. Based on:
http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf
"""
rho = theano.shared(np.float32(rho).astype(floatX))
epsilon = theano.shared(np.float32(epsilon).astype(floatX))
gradients = self.get_gradients(cost, params,consider_constant)
accu_gradients = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
accu_deltas = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
updates = []
for param, gradient, accu_gradient, accu_delta in zip(params, gradients, accu_gradients, accu_deltas):
new_accu_gradient = rho * accu_gradient + (1. - rho) * gradient ** 2.
delta_x = - T.sqrt((accu_delta + epsilon) / (new_accu_gradient + epsilon)) * gradient
new_accu_delta = rho * accu_delta + (1. - rho) * delta_x ** 2.
updates.append((accu_gradient, new_accu_gradient))
updates.append((accu_delta, new_accu_delta))
updates.append((param, param + delta_x))
return updates
示例3: rmsprop
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def rmsprop(self, cost, params, lr=0.001, rho=0.9, eps=1e-6,consider_constant=None):
"""
RMSProp.
"""
lr = theano.shared(np.float32(lr).astype(floatX))
gradients = self.get_gradients(cost, params,consider_constant)
accumulators = [theano.shared(np.zeros_like(p.get_value()).astype(np.float32)) for p in params]
updates = []
for param, gradient, accumulator in zip(params, gradients, accumulators):
new_accumulator = rho * accumulator + (1 - rho) * gradient ** 2
updates.append((accumulator, new_accumulator))
new_param = param - lr * gradient / T.sqrt(new_accumulator + eps)
updates.append((param, new_param))
return updates
示例4: Adam
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def Adam(cost, params, lr=0.0002, b1=0.1, b2=0.001, e=1e-8):
updates = []
grads = T.grad(cost, params)
i = theano.shared(np.array(0., theano.config.floatX))
i_t = i + 1.
fix1 = 1. - (1. - b1)**i_t
fix2 = 1. - (1. - b2)**i_t
lr_t = lr * (T.sqrt(fix2) / fix1)
for p, g in zip(params, grads):
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * T.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (T.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
return updates
示例5: RMSprop
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def RMSprop(cost, params, lr=0.001, rho=0.9, epsilon=1e-6, grads=None):
# From:
# https://github.com/Newmu/Theano-Tutorials/blob/master/4_modern_net.py
if grads is None:
grads = T.grad(cost=cost, wrt=params)
assert len(grads) == len(params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(np.zeros_like(p.get_value(), dtype=np.float32),
name="%s/rms/acc" % p.name)
acc_new = rho * acc + (1 - rho) * g ** 2
gradient_scaling = T.sqrt(acc_new + epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - lr * g))
return updates
示例6: adam
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def adam(self, param, grad, updates, sample_idx=None, epsilon=1e-6):
v1 = np.float32(self.decay)
v2 = np.float32(1.0 - self.decay)
acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
meang = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
countt = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
if sample_idx is None:
acc_new = v1 * acc + v2 * grad ** 2
meang_new = v1 * meang + v2 * grad
countt_new = countt + 1
updates[acc] = acc_new
updates[meang] = meang_new
updates[countt] = countt_new
else:
acc_s = acc[sample_idx]
meang_s = meang[sample_idx]
countt_s = countt[sample_idx]
acc_new = v1 * acc_s + v2 * grad ** 2
meang_new = v1 * meang_s + v2 * grad
countt_new = countt_s + 1.0
updates[acc] = T.set_subtensor(acc_s, acc_new)
updates[meang] = T.set_subtensor(meang_s, meang_new)
updates[countt] = T.set_subtensor(countt_s, countt_new)
return (meang_new / (1 - v1 ** countt_new)) / (T.sqrt(acc_new / (1 - v1 ** countt_new)) + epsilon)
示例7: get_noise
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def get_noise(self, size):
# Allow just requesting batch size
if isinstance(size, int):
size = (size, self.get_input_space().get_total_dimension())
if not hasattr(self, 'noise'):
self.noise = "gaussian"
if self.noise == "uniform":
return self.theano_rng.uniform(low=-np.sqrt(3), high=np.sqrt(3), size=size, dtype='float32')
elif self.noise == "gaussian":
return self.theano_rng.normal(size=size, dtype='float32')
elif self.noise == "spherical":
noise = self.theano_rng.normal(size=size, dtype='float32')
noise = noise / T.maximum(1e-7, T.sqrt(T.sqr(noise).sum(axis=1))).dimshuffle(0, 'x')
return noise
else:
raise NotImplementedError(self.noise)
示例8: _modify_updates
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def _modify_updates(self, updates):
"""
Replaces the values in `updates` if needed to enforce the options set
in the __init__ method, including `max_kernel_norm`.
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters (including parameters not
belonging to this model) to updated values of those parameters.
The dictionary passed in contains the updates proposed by the
learning algorithm. This function modifies the dictionary
directly. The modified version will be compiled and executed
by the learning algorithm.
"""
if self.max_kernel_norm is not None:
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=(0, 1, 2)))
desired_norms = T.clip(row_norms, 0, self.max_kernel_norm)
scales = desired_norms / (1e-7 + row_norms)
updates[W] = (updated_W * scales.dimshuffle('x', 'x', 'x', 0))
示例9: adam_updates
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def adam_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
updates = []
grads = T.grad(cost, params)
t = th.shared(np.cast[th.config.floatX](1.))
for p, g in zip(params, grads):
v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
v_t = mom1*v + (1. - mom1)*g
mg_t = mom2*mg + (1. - mom2)*T.square(g)
v_hat = v_t / (1. - mom1 ** t)
mg_hat = mg_t / (1. - mom2 ** t)
g_t = v_hat / T.sqrt(mg_hat + 1e-8)
p_t = p - lr * g_t
updates.append((v, v_t))
updates.append((mg, mg_t))
updates.append((p, p_t))
updates.append((t, t+1))
return updates
示例10: get_output_for
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic:
norm_features = (input-self.avg_batch_mean.dimshuffle(*self.dimshuffle_args)) / T.sqrt(1e-6 + self.avg_batch_var).dimshuffle(*self.dimshuffle_args)
else:
batch_mean = T.mean(input,axis=self.axes_to_sum).flatten()
centered_input = input-batch_mean.dimshuffle(*self.dimshuffle_args)
batch_var = T.mean(T.square(centered_input),axis=self.axes_to_sum).flatten()
batch_stdv = T.sqrt(1e-6 + batch_var)
norm_features = centered_input / batch_stdv.dimshuffle(*self.dimshuffle_args)
# BN updates
new_m = 0.9*self.avg_batch_mean + 0.1*batch_mean
new_v = 0.9*self.avg_batch_var + T.cast((0.1*input.shape[0])/(input.shape[0]-1),th.config.floatX)*batch_var
self.bn_updates = [(self.avg_batch_mean, new_m), (self.avg_batch_var, new_v)]
if hasattr(self, 'g'):
activation = norm_features*self.g.dimshuffle(*self.dimshuffle_args)
else:
activation = norm_features
if hasattr(self, 'b'):
activation += self.b.dimshuffle(*self.dimshuffle_args)
return self.nonlinearity(activation)
示例11: rbf_mmd2_streaming_and_ratio
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def rbf_mmd2_streaming_and_ratio(X, Y, sigma=0):
# n = (T.smallest(X.shape[0], Y.shape[0]) // 2) * 2
n = (X.shape[0] // 2) * 2
gamma = 1 / (2 * sigma**2)
rbf = lambda A, B: T.exp(-gamma * ((A - B) ** 2).sum(axis=1))
h_bits = (rbf(X[:n:2], X[1:n:2]) + rbf(Y[:n:2], Y[1:n:2])
- rbf(X[:n:2], Y[1:n:2]) - rbf(X[1:n:2], Y[:n:2]))
mmd2 = h_bits.mean()
# variance is 1/2 E_{v, v'} (h(v) - h(v'))^2
# estimate with even, odd diffs
m = (n // 2) * 2
approx_var = 1/2 * ((h_bits[:m:2] - h_bits[1:m:2]) ** 2).mean()
ratio = mmd2 / T.sqrt(T.largest(approx_var, _eps))
return mmd2, ratio
################################################################################
### MMD with linear kernel
# Hotelling test statistic is from:
# Jitkrittum, Szabo, Chwialkowski, and Gretton.
# Interpretable Distribution Features with Maximum Testing Power.
# NIPS 2015.
示例12: __call__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
t = theano.shared(floatX(1.))
b1_t = self.b1 * self.l**(t - 1)
for p, g in zip(params, grads):
g = self.regularizer.gradient_regularize(p, g)
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_t = b1_t * m + (1 - b1_t) * g
v_t = self.b2 * v + (1 - self.b2) * g**2
m_c = m_t / (1 - self.b1**t)
v_c = v_t / (1 - self.b2**t)
p_t = p - (self.lr * m_c) / (T.sqrt(v_c) + self.e)
p_t = self.regularizer.weight_regularize(p_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((t, t + 1.))
return updates
示例13: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def __init__(self):
def f(x, u, i, terminal):
if terminal:
ctrl_cost = T.zeros_like(x[..., 0])
else:
ctrl_cost = T.square(u).sum(axis=-1)
# x: (batch_size, 8)
# x[..., 0:4]: qpos
# x[..., 4:8]: qvel, time derivatives of qpos, not used in the cost.
theta = x[..., 0] # qpos[0]: angle of joint 0
phi = x[..., 1] # qpos[1]: angle of joint 1
target_xpos = x[..., 2:4] # qpos[2:4], target x & y coordinate
body1_xpos = 0.1 * T.stack([T.cos(theta), T.sin(theta)], axis=1)
tip_xpos_incr = 0.11 * T.stack([T.cos(phi), T.sin(phi)], axis=1)
tip_xpos = body1_xpos + tip_xpos_incr
delta = tip_xpos - target_xpos
state_cost = T.sqrt(T.sum(delta * delta, axis=-1))
cost = state_cost + ctrl_cost
return cost
super().__init__(f, state_size=8, action_size=2)
示例14: adadelta
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def adadelta(lr, tparams, grads, inp, cost):
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rup2'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
rg2_new = [0.95 * rg2 + 0.05 * (g ** 2) for rg2, g in zip(running_grads2, grads)]
rg2up = [(rg2, r_n) for rg2, r_n in zip(running_grads2, rg2_new)]
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(grads, running_up2, rg2_new)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
inp += [lr]
f_update = theano.function(inp, cost, updates=rg2up+ru2up+param_up, on_unused_input='ignore', profile=profile)
return f_update
示例15: debugging_adadelta
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sqrt [as 别名]
def debugging_adadelta(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rup2'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up, profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update