本文整理匯總了Python中theano.tensor.clip方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.clip方法的具體用法?Python tensor.clip怎麽用?Python tensor.clip使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.clip方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _modify_updates
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def _modify_updates(self, updates):
"""
Replaces the values in `updates` if needed to enforce the options set
in the __init__ method, including `max_kernel_norm`.
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters (including parameters not
belonging to this model) to updated values of those parameters.
The dictionary passed in contains the updates proposed by the
learning algorithm. This function modifies the dictionary
directly. The modified version will be compiled and executed
by the learning algorithm.
"""
if self.max_kernel_norm is not None:
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=(0, 1, 2)))
desired_norms = T.clip(row_norms, 0, self.max_kernel_norm)
scales = desired_norms / (1e-7 + row_norms)
updates[W] = (updated_W * scales.dimshuffle('x', 'x', 'x', 0))
示例2: adam
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def adam(cost, params, lr=0.001, b1=0.9, b2=0.999, e=1e-8):
updates = []
grads = T.grad(cost, params)
i = theano.shared(np.dtype(theano.config.floatX).type(1))
i_t = i + 1.
fix1 = 1. - (1. - b1)**i_t
fix2 = 1. - (1. - b2)**i_t
lr_t = lr * (T.sqrt(fix2) / fix1)
for p, g in zip(params, grads):
g = T.clip(g, -grad_clip, grad_clip)
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * T.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (T.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
return updates
示例3: update_weights
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def update_weights(self, grads):
"""
Update the weights using (modified) gradient descent. Done in place.
:param Weights grads: The gradients of the error function w.r.t. the weights
"""
# Set invalid values to 0 and clip extreme gradients
for grad in grads:
grad[~np.isfinite(grad)] = 0
np.clip(grad, -self.opts.largest_grad, self.opts.largest_grad, out=grad)
# Gather the gradient norms
grad_norms = [np.linalg.norm(grad) for grad in grads]
max_grad_norm = max(grad_norms)
if max_grad_norm > self.opts.grad_norm_limit:
offset = self.opts.grad_norm_limit / max_grad_norm
else:
offset = 1.0
# Gradient descent
for weight, grad in zip(self.weights, grads):
weight -= self.learning_rate * grad * offset
self._copy_and_scale_weights()
示例4: score_metrics
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def score_metrics(out, target_var, weight_map, l2_loss=0):
_EPSILON=1e-8
out_flat = out.dimshuffle(1,0,2,3).flatten(ndim=2).dimshuffle(1,0)
target_flat = target_var.dimshuffle(1,0,2,3).flatten(ndim=1)
weight_flat = weight_map.dimshuffle(1,0,2,3).flatten(ndim=1)
prediction = lasagne.nonlinearities.softmax(out_flat)
prediction_binary = T.argmax(prediction, axis=1)
dice_score = (T.sum(T.eq(2, prediction_binary+target_flat))*2.0 /
(T.sum(prediction_binary) + T.sum(target_flat)))
loss = lasagne.objectives.categorical_crossentropy(T.clip(prediction,_EPSILON,1-_EPSILON), target_flat)
loss = loss * weight_flat
loss = loss.mean()
loss += l2_loss
accuracy = T.mean(T.eq(prediction_binary, target_flat),
dtype=theano.config.floatX)
return loss, accuracy, dice_score, target_flat, prediction, prediction_binary
示例5: gsm
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def gsm(name, k, w={}, logvar_minmax=16):
w[name+'_weight'] = G.sharedf(np.zeros((k,)))
w[name+'_logvar'] = G.sharedf(np.random.randn(k)*.1)
def logp(v, w):
mixtureweights = T.exp(w[name+'_weight'])
mixtureweights /= mixtureweights.sum()
logvar = logvar_minmax*w[name+'_logvar']
var = T.exp(logvar)
if k == 0:
return 0.
if k == 1:
return -.5*(v**2).sum()/var[0] - v.size.astype(G.floatX) * (.5*T.log(2.*math.pi) + logvar[0])
p = 0.
for i in range(k):
p += mixtureweights[i] * T.exp(-.5*v**2/var[i]) / T.sqrt(2.*math.pi*var[i])
logp = T.log(p).sum()
return logp
def postup(updates, w):
updates[w[name+'_logvar']] = T.clip(updates[w[name+'_logvar']], -1., 1.)
return updates
return G.Struct(logp=logp, postup=postup, w=w)
示例6: relu
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def relu(x, alpha=0., max_value=None, threshold=0.):
_assert_has_capability(T.nnet, 'relu')
if alpha != 0.:
if threshold != 0.:
negative_part = T.nnet.relu(-x + threshold)
else:
negative_part = T.nnet.relu(-x)
if threshold != 0.:
x = x * T.cast(T.gt(x, threshold), floatx())
else:
x = T.nnet.relu(x)
if max_value is not None:
x = T.clip(x, 0.0, max_value)
if alpha != 0.:
x -= alpha * negative_part
return x
示例7: categorical_crossentropy
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
output_dimensions = list(range(len(int_shape(output))))
if axis != -1 and axis not in output_dimensions:
raise ValueError(
'{}{}{}'.format(
'Unexpected channels axis {}. '.format(axis),
'Expected to be -1 or one of the axes of `output`, ',
'which has {} dimensions.'.format(len(int_shape(output)))))
# If the channels are not in the last axis, move them to be there:
if axis != -1 and axis != output_dimensions[-1]:
permutation = output_dimensions[:axis]
permutation += output_dimensions[axis + 1:] + [axis]
output = permute_dimensions(output, permutation)
target = permute_dimensions(target, permutation)
if from_logits:
output = T.nnet.softmax(output)
else:
# scale preds so that the class probas of each sample sum to 1
output /= output.sum(axis=-1, keepdims=True)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, epsilon(), 1.0 - epsilon())
return T.nnet.categorical_crossentropy(output, target)
示例8: time_distributed_mean_categorical_crossentropy
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def time_distributed_mean_categorical_crossentropy(pred, targ, weight=None):
pred = T.reshape(pred, (-1, pred.shape[-1]))
if targ.ndim == 3:
targ = T.reshape(targ, (-1, targ.shape[-1]))
else:
targ = targ.flatten()
_EPSILON = 10e-8
pred = T.clip(pred, _EPSILON, 1.0 - _EPSILON)
loss = lasagne.objectives.categorical_crossentropy(pred, targ)
if weight is not None:
weight = weight.flatten()
return lasagne.objectives.aggregate(loss, weight, mode='normalized_sum')
# else:
return T.mean(loss)
示例9: answer_probabilities
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def answer_probabilities(self, *args, **kwargs):
"""
normalise the answer groups for each question.
"""
input = self.input_layer.output(*args, **kwargs)
input_clipped = T.clip(input, 0, 1) # T.maximum(input, 0)
q1 = input_clipped[:, 0:3] # 1.1 - 1.3
q2 = input_clipped[:, 3:5] # 2.1 - 2.2
q3 = input_clipped[:, 5:7] # 3.1 - 3.2
q4 = input_clipped[:, 7:9] # 4.1 - 4.2
q5 = input_clipped[:, 9:13] # 5.1 - 5.4
q6 = input_clipped[:, 13:15] # 6.1 - 6.2
q7 = input_clipped[:, 15:18] # 7.1 - 7.3
q8 = input_clipped[:, 18:25] # 8.1 - 8.7
q9 = input_clipped[:, 25:28] # 9.1 - 9.3
q10 = input_clipped[:, 28:31] # 10.1 - 10.3
q11 = input_clipped[:, 31:37] # 11.1 - 11.6
return q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11
示例10: _normalize
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def _normalize(self, deltas):
"""Normalizes the norm of parameter updates to given maximum value.
:type deltas: dict of strs to symbolic tensors
:param deltas: mapping from variable names to symbolic tensors that
describe the amount and direction of parameter updates
(normally the negative gradient of each parameter), after
any adaptation applied by the optimization method
:rtype: dict of strs to symbolic tensors
:returns: mapping from variable names to symbolic tensors that describe
``deltas`` after normalization has been applied
"""
max_norm = self._max_gradient_norm
if max_norm is not None:
norm = tensor.sqrt(sum_of_squares(deltas.values()))
target_norm = tensor.clip(norm, 0.0, max_norm)
for name, delta in deltas.items():
deltas[name] = delta * target_norm / (self._epsilon + norm)
return deltas
示例11: sqrt
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def sqrt(x):
x = T.clip(x, 0., np.inf)
return T.sqrt(x)
示例12: clip
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def clip(x, min_value, max_value):
if max_value < min_value:
max_value = min_value
return T.clip(x, min_value, max_value)
示例13: categorical_crossentropy
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def categorical_crossentropy(output, target, from_logits=False):
if from_logits:
output = T.nnet.softmax(output)
else:
# scale preds so that the class probas of each sample sum to 1
output /= output.sum(axis=-1, keepdims=True)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
return T.nnet.categorical_crossentropy(output, target)
示例14: binary_crossentropy
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def binary_crossentropy(output, target, from_logits=False):
if from_logits:
output = T.nnet.sigmoid(output)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
return T.nnet.binary_crossentropy(output, target)
示例15: cosine
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import clip [as 別名]
def cosine(x, y):
return tt.clip((1 - (x * y).sum(axis=-1) /
(_magnitude(x) * _magnitude(y))) / 2, 0, 1)