本文整理汇总了Python中theano.tensor.minimum方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.minimum方法的具体用法?Python tensor.minimum怎么用?Python tensor.minimum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.minimum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ctc_update_log_p
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
active_next = T.cast(T.minimum(
T.maximum(
active + 1,
T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
), log_p_curr.shape[0]), 'int32')
common_factor = T.max(log_p_prev[:active])
p_prev = T.exp(log_p_prev[:active] - common_factor)
_p_prev = zeros[:active_next]
# copy over
_p_prev = T.set_subtensor(_p_prev[:active], p_prev)
# previous transitions
_p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
# skip transitions
_p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
updated_log_p_prev = T.log(_p_prev) + common_factor
log_p_next = T.set_subtensor(
zeros[:active_next],
log_p_curr[:active_next] + updated_log_p_prev
)
return active_next, log_p_next
示例2: rprop_core
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def rprop_core(params, gradients, rprop_increase=1.01, rprop_decrease=0.99, rprop_min_step=0, rprop_max_step=100,
learning_rate=0.01):
"""
Rprop optimizer.
See http://sci2s.ugr.es/keel/pdf/algorithm/articulo/2003-Neuro-Igel-IRprop+.pdf.
"""
for param, grad in zip(params, gradients):
grad_tm1 = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_grad')
step_tm1 = theano.shared(np.zeros_like(param.get_value()) + learning_rate, name=param.name+ '_step')
test = grad * grad_tm1
same = T.gt(test, 0)
diff = T.lt(test, 0)
step = T.minimum(rprop_max_step, T.maximum(rprop_min_step, step_tm1 * (
T.eq(test, 0) +
same * rprop_increase +
diff * rprop_decrease)))
grad = grad - diff * grad
yield param, param - T.sgn(grad) * step
yield grad_tm1, grad
yield step_tm1, step
示例3: minimum
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def minimum(x, y):
return T.minimum(x, y)
示例4: relu
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def relu(x, alpha=0., max_value=None):
_assert_has_capability(T.nnet, 'relu')
x = T.nnet.relu(x, alpha)
if max_value is not None:
x = T.minimum(x, max_value)
return x
示例5: minimum
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def minimum(x, y):
if checkgrad:
return x + y
return T.minimum(x, y)
示例6: minimum
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def minimum(x, y):
return T.minimum(x, y)
# SHAPE OPERATIONS
示例7: relu
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def relu(x, alpha=0., max_value=None):
assert hasattr(T.nnet, 'relu'), ('It looks like like your version of '
'Theano is out of date. '
'Install the latest version with:\n'
'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps')
x = T.nnet.relu(x, alpha)
if max_value is not None:
x = T.minimum(x, max_value)
return x
示例8: infer_shape
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def infer_shape(self, nodes, shapes):
return [(tensor.minimum(*shapes[0]), )]
示例9: structured_minimum
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def structured_minimum(x, y):
"""
Structured elemwise minimum of sparse matrix x by scalar y.
"""
# see decorator for function body
示例10: parse_transfer_function
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def parse_transfer_function(string_identifier, slope_parameter = None):
""" This function returns the appropriate activation function, as selected by the string argument.
string_identifier:
possible values are tanh, ReLU/relu, sigmoid/sig, abs, maxout <number>, linear/lin
RETURNS:
transfer_function(python/theano function), string_identifier (normalized), dict (for special cases)
"""
cross_channel_pooling_groups=None
if string_identifier=='tanh':
Activation_f = T.tanh
elif string_identifier in ['ReLU', 'relu']: #rectified linear unit
string_identifier = "relu"
Activation_f = lambda x: x*(x>0)
elif string_identifier in ['sigmoid', 'sig']:
string_identifier = "sigmoid"
Activation_f = T.nnet.sigmoid
elif string_identifier in ['abs', 'Abs', 'absolute']:
string_identifier='abs'
Activation_f = T.abs_
elif string_identifier in ['plu','PLu','PLU','piecewise']: #piece-wise linear function
string_identifier = "PLU"
print "parse_transfer_function::Remember to optimize the 'slope_parameter'"
assert slope_parameter is not None,"...and better pass it to this function, as well! (type: Theano.Tensor, shape: same as activation, unif. random values [-1,1] should be fine)"
Activation_f = lambda x: T.maximum(0,x) + T.minimum(0,x) * slope_parameter
elif "maxout" in string_identifier:
r=int(string_identifier.split(" ")[1])
assert r>=2
cross_channel_pooling_groups = r
elif string_identifier in ['linear',"lin"]:
string_identifier = "linear"
Activation_f = lambda x:x
else:
raise NotImplementedError()
return Activation_f, string_identifier, {"cross_channel_pooling_groups":cross_channel_pooling_groups}
示例11: q_loss
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def q_loss(self, y_true, y_pred):
# assume clip_delta is 1.0
# along with sum accumulator.
diff = y_true - y_pred
_quad = T.minimum(abs(diff), 1.0)
_lin = abs(diff) - _quad
loss = 0.5 * _quad ** 2 + _lin
loss = T.sum(loss)
return loss
示例12: cliplin
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def cliplin(X):
return T.minimum(T.maximum(X, -2.), 2.)
示例13: rescaled_weights
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def rescaled_weights(self, c): # c is the maximal norm of the weight vector going into a single filter.
norms = T.sqrt(T.sqr(self.W).mean(0, keepdims=True))
scale_factors = T.minimum(c / norms, 1)
return self.W * scale_factors
示例14: PReLU
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def PReLU(a, x):
return T.maximum(0.0, x) + a * T.minimum(0.0, x)
示例15: _get_updates_for
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import minimum [as 别名]
def _get_updates_for(self, param, grad):
grad_tm1 = util.shared_like(param, 'grad')
step_tm1 = util.shared_like(param, 'step', self.learning_rate.eval())
test = grad * grad_tm1
diff = TT.lt(test, 0)
steps = step_tm1 * (TT.eq(test, 0) +
TT.gt(test, 0) * self.step_increase +
diff * self.step_decrease)
step = TT.minimum(self.max_step, TT.maximum(self.min_step, steps))
grad = grad - diff * grad
yield param, TT.sgn(grad) * step
yield grad_tm1, grad
yield step_tm1, step