本文整理匯總了Python中theano.tensor.pow方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.pow方法的具體用法?Python tensor.pow怎麽用?Python tensor.pow使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.pow方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_adam_updates
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def get_adam_updates(f, params, lr=10., b1=0.9, b2=0.999, e=1e-8, dec=5e-3, norm_grads=False):
"""Generate updates to optimize using the Adam optimizer with linear learning rate decay."""
t = theano.shared(0)
ms = [theano.shared(np.zeros(param.shape.eval(), dtype=floatX), borrow=True) for param in params]
vs = [theano.shared(np.zeros(param.shape.eval(), dtype=floatX), borrow=True) for param in params]
gs = T.grad(f, params)
if norm_grads:
gs = [g / (T.sum(T.abs_(g)) + 1e-8) for g in gs]
t_u = (t, t + 1)
m_us = [(m, b1 * m + (1. - b1) * g) for m, g in zip(ms, gs)]
v_us = [(v, b2 * v + (1. - b2) * T.sqr(g)) for v, g in zip(vs, gs)]
t_u_f = T.cast(t_u[1], floatX)
lr_hat = (lr / (1. + t_u_f * dec)) * T.sqrt(1. - T.pow(b2, t_u_f)) / (1. - T.pow(b1, t_u_f))
param_us = [(param, param - lr_hat * m_u[1] / (T.sqrt(v_u[1]) + e)) for m_u, v_u, param in zip(m_us, v_us, params)]
return m_us + v_us + param_us + [t_u]
示例2: test_sharpening
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def test_sharpening():
weight_var, gamma_var = T.tensor3s('weight', 'gamma')
gamma_var = T.addbroadcast(gamma_var, 2)
w = T.pow(weight_var + 1e-6, gamma_var)
w /= T.sum(w, axis=2).dimshuffle(0, 1, 'x')
sharpening_fn = theano.function([weight_var, gamma_var], w)
weights = np.random.rand(16, 4, 128)
gamma = np.random.rand(16, 4, 1)
weight_t = sharpening_fn(weights, gamma)
weight_t_manual = np.zeros_like(weight_t)
for i in range(16):
for j in range(4):
for k in range(128):
weight_t_manual[i, j, k] = np.power(weights[i, j, k] + 1e-6, gamma[i, j])
weight_t_manual[i, j] /= np.sum(weight_t_manual[i, j])
assert weight_t.shape == (16, 4, 128)
assert np.allclose(weight_t, weight_t_manual)
示例3: compute_sub_all_scores
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def compute_sub_all_scores(self, start_end):
shp0 = len(start_end)
tra_ls = self.tra_pois_masks[start_end, T.sum(self.tra_masks[start_end], axis=1) - 1]
ls = T.concatenate([tra_ls.reshape((shp0, 1)), self.tes_pois_masks[start_end, :T.max(T.sum(self.tes_masks[start_end], axis=1))-1]], axis=1)
dsl = self.trained_ds[ls]
# n_item+1 x letent_size
du = self.trained_du[start_end]
dp = self.trained_dp
ds = self.trained_ds
_, shp1, shp3 = dsl.shape
shp2, shp3 = dp.shape
wl = T.pow(1 + cal_dis(self.cordi[ls][:, :, 0].reshape((shp0, shp1, 1)), self.cordi[ls][:, :, 1].reshape((shp0, shp1, 1)),
self.cordi[:, 0].reshape((1, 1, shp2+1)), self.cordi[:, 1].reshape((1, 1, shp2+1))), 0.25)
sub_all_scores = - wl[:, :, :-1] * (self.cw * T.sum(T.pow(du.reshape((shp0, 1, shp3)) - dp.reshape((1, shp2, shp3)), 2), axis=2).reshape((shp0, 1, shp2)) +
(1 - self.cw) * T.sum(T.pow((dsl.reshape((shp0, shp1, 1, shp3)) -
ds[:-1].reshape((1, 1, shp2, shp3))), 2), axis=3))
# sub_all_scores = - (self.cw * T.sum(T.pow(du.reshape((shp0, 1, shp3)) -
# dp.reshape((1, shp2, shp3)), 2), axis=2).reshape((shp0, 1, shp2)) +
# (1 - self.cw) * T.sum(T.pow((dsl.reshape((shp0, shp1, 1, shp3)) -
# ds[:-1].reshape((1, 1, shp2, shp3))), 2), axis=3))
return T.reshape(sub_all_scores, (shp0 * shp1, shp2)).eval()
示例4: get_parent_state
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def get_parent_state(self, children_states, node_type, use_dropout: bool, iteration_number) -> tuple:
layer_input = T.flatten(children_states)
nn_out = self.__compute_layer_output(layer_input, node_type, use_dropout, iteration_number)
encoder_input = T.flatten(T.concatenate((children_states, nn_out))) * self.__ae_noise
encoding = T.tanh(T.dot(encoder_input, self.__encoder_weights[node_type]))
decoded = T.tanh(T.dot(encoding, self.__decoder_weights))
decoded /= decoded.norm(2) / layer_input.norm(2)
output_reconstruction = self.__compute_layer_output(decoded, node_type, use_dropout, iteration_number)
reconstruction_cos = T.dot(nn_out[0], output_reconstruction[0])
children_reconstruction_cos = T.dot(decoded, layer_input)
additional_objective = reconstruction_cos + children_reconstruction_cos
constrain_usage_pct = T.cast(1. - T.pow(self.__hyperparameters['constrain_intro_rate'], iteration_number),
theano.config.floatX)
return nn_out[0], constrain_usage_pct * additional_objective
示例5: launch_experiments
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def launch_experiments(args):
# create Theano variables for input and target minibatch
input_var = T.fmatrix('X')
# Create tang function
# need abs to avoid nan with pow on the GPU
f0 = 0.5 * (T.pow(abs(input_var), 4) - 16 * T.pow(abs(input_var), 2) + 5 * input_var)
tang_output = f0.sum(axis=-1)
# Create and train network normally
network = create_student_model(input_var)
out = standard_train(args, input_var, network, tang_output)
# Create and train network with Sobolev
network_sobolev = create_student_model(input_var)
out_sobolev = sobolev_train(args, input_var, network_sobolev, tang_output)
# Now plot and compare outputs
plot_results.plot_results(args, out, out_sobolev)
示例6: pow
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def pow(x, a):
return T.pow(x, a)
示例7: ADAM
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def ADAM(lr, params, grads, loss, iteration, beta_1=0.9, beta_2=0.999, epsilon=1e-8):
"""
ADAM update
"""
t = iteration
lr_t = lr * T.sqrt(1 - T.pow(beta_2, t)) / (1 - T.pow(beta_1, t))
w_decay = cfg.TRAIN.WEIGHT_DECAY
updates = []
for p, g in zip(params, grads):
# zero init of moment
m = theano.shared(p.val.get_value() * 0.)
# zero init of velocity
v = theano.shared(p.val.get_value() * 0.)
if p.is_bias or w_decay == 0:
regularized_g = g
else:
regularized_g = g + w_decay * p.val
m_t = (beta_1 * m) + (1 - beta_1) * regularized_g
v_t = (beta_2 * v) + (1 - beta_2) * T.square(regularized_g)
p_t = p.val - lr_t * m_t / (T.sqrt(v_t) + epsilon)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p.val, p_t))
return updates
示例8: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def __init__(self, w_list, x_list, p, q, r, k, params, updates, eps=1.0e-6):
"""Transcription of equation 2.1 from paper (page 1434).
"""
if len(w_list) != len(x_list):
raise ValueError('w_list must have same len as x_list')
output = (sum(w * tensor.pow(x, p) for (w, x) in zip(w_list, x_list)))\
/ (theano._asarray(eps, dtype=k.type.dtype) + k + tensor.pow(sum(tensor.pow(x, q) for x in x_list), r))
assert output.type.ndim == 2
self.__dict__.update(locals())
del self.__dict__['self']
_logger.debug('output dtype %s' % output.dtype)
示例9: test_int_pow
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def test_int_pow():
a = CudaNdarrayType([False])()
f = theano.function([a], (a*4).sum(), mode=mode_with_gpu)
op_names = [n.op.__class__.__name__ for n in f.maker.fgraph.toposort()]
assert op_names == ['GpuCAReduce', 'GpuElemwise', 'HostFromGpu']
f = theano.function([a], tensor.pow(a, 4).sum(), mode=mode_with_gpu)
op_names = [n.op.__class__.__name__ for n in f.maker.fgraph.toposort()]
assert op_names == ['GpuElemwise', 'GpuCAReduce', 'HostFromGpu']
示例10: is_positive
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def is_positive(v):
if hints(v).get('positive', False):
return True
# TODO: how to handle this - a registry?
# infer_hints on Ops?
logger.debug('is_positive: %s' % str(v))
if v.owner and v.owner.op == tensor.pow:
try:
exponent = tensor.get_scalar_constant_value(v.owner.inputs[1])
except tensor.basic.NotScalarConstantError:
return False
if 0 == exponent % 2:
return True
return False
示例11: local_log_pow
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def local_log_pow(node):
if node.op == tensor.log:
x, = node.inputs
if x.owner and x.owner.op == tensor.pow:
base, exponent = x.owner.inputs
# TODO: reason to be careful with dtypes?
return [exponent * tensor.log(base)]
示例12: gelu
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def gelu(x):
return 0.5 * x * (1 + T.tanh(T.sqrt(2 / np.pi) * (x + 0.044715 * T.pow(x, 3))))
示例13: costfunction
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def costfunction(self,y):
z = y.copy().astype('float64')
print()
return T.sum(T.pow(self.output.dimshuffle(1,0) - z, 2))/(2 * y.shape[0])
示例14: errors
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def errors(self,y):
z = y.copy().astype('float64')
return T.sum(T.pow(self.output.dimshuffle(1,0) - z, 2)) / ( y.shape[0])
示例15: compute_output
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import pow [as 別名]
def compute_output(self, network, in_vw):
exponent = network.find_hyperparameter(["exponent"])
network.create_vw(
"default",
variable=T.pow(in_vw.variable, exponent),
shape=in_vw.shape,
tags={"output"}
)