本文整理匯總了Python中theano.tensor.max方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.max方法的具體用法?Python tensor.max怎麽用?Python tensor.max使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.max方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: ctc_update_log_p
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
active_next = T.cast(T.minimum(
T.maximum(
active + 1,
T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
), log_p_curr.shape[0]), 'int32')
common_factor = T.max(log_p_prev[:active])
p_prev = T.exp(log_p_prev[:active] - common_factor)
_p_prev = zeros[:active_next]
# copy over
_p_prev = T.set_subtensor(_p_prev[:active], p_prev)
# previous transitions
_p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
# skip transitions
_p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
updated_log_p_prev = T.log(_p_prev) + common_factor
log_p_next = T.set_subtensor(
zeros[:active_next],
log_p_curr[:active_next] + updated_log_p_prev
)
return active_next, log_p_next
示例2: reduce_log_sum
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def reduce_log_sum(tensor, axis=None, guaranteed_finite=False):
"""
Sum probabilities in the log domain, i.e return
log(e^vec[0] + e^vec[1] + ...)
= log(e^x e^(vec[0]-x) + e^x e^(vec[1]-x) + ...)
= log(e^x [e^(vec[0]-x) + e^(vec[1]-x) + ...])
= log(e^x) + log(e^(vec[0]-x) + e^(vec[1]-x) + ...)
= x + log(e^(vec[0]-x) + e^(vec[1]-x) + ...)
For numerical stability, we choose x = max(vec)
Note that if x is -inf, that means all values are -inf,
so the answer should be -inf. In this case, choose x = 0
"""
maxval = T.max(tensor, axis)
maxval_full = T.max(tensor, axis, keepdims=True)
if not guaranteed_finite:
maxval = T.switch(T.isfinite(maxval), maxval, T.zeros_like(maxval))
maxval_full = T.switch(T.isfinite(maxval_full), maxval_full, T.zeros_like(maxval_full))
reduced_sum = T.sum(T.exp(tensor - maxval_full), axis)
logsum = maxval + T.log(reduced_sum)
return logsum
示例3: test_optimization
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def test_optimization(self):
# If we use only the max output, we should replace this op with
# a faster one.
mode = theano.compile.mode.get_default_mode().including(
'canonicalize', 'fast_run')
for axis in [0, 1, -1]:
data = numpy.asarray(numpy.random.rand(2, 3), dtype=config.floatX)
n = tensor.matrix()
f = function([n], tensor.max_and_argmax(n, axis)[0], mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce)
f = function([n], tensor.max_and_argmax(n, axis), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, tensor.MaxAndArgmax)
示例4: theano_logsumexp
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def theano_logsumexp(x, axis=None):
"""
Compute log(sum(exp(x), axis=axis) in a numerically stable
fashion.
Parameters
----------
x : tensor_like
A Theano tensor (any dimension will do).
axis : int or symbolic integer scalar, or None
Axis over which to perform the summation. `None`, the
default, performs over all axes.
Returns
-------
result : ndarray or scalar
The result of the log(sum(exp(...))) operation.
"""
xmax = x.max(axis=axis, keepdims=True)
xmax_ = x.max(axis=axis)
return xmax_ + T.log(T.exp(x - xmax).sum(axis=axis))
示例5: theano_logsumexp
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def theano_logsumexp(x, axis=None):
"""
Compute log(sum(exp(x), axis=axis) in a numerically stable
fashion.
Parameters
----------
x : tensor_like
A Theano tensor (any dimension will do).
axis : int or symbolic integer scalar, or None
Axis over which to perform the summation. `None`, the
default, performs over all axes.
Returns
-------
result : ndarray or scalar
The result of the log(sum(exp(...))) operation.
"""
xmax = T.max(x,axis=axis, keepdims=True)
xmax_ = T.max(x,axis=axis)
return xmax_ + T.log(T.exp(x - xmax).sum(axis=axis))
示例6: max
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def max(x, axis=None, keepdims=False):
return T.max(x, axis=axis, keepdims=keepdims)
示例7: ctc_cost
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def ctc_cost(predict, Y):
log_probs, mask = ctc_path_probs(predict, ctc_interleave_blanks(Y))
common_factor = T.max(log_probs)
total_log_prob = T.log(T.sum(T.exp(log_probs - common_factor)[mask.nonzero()])) + common_factor
return -total_log_prob
# batchifies original CTC code
示例8: find_recent_params
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def find_recent_params(outputdir):
files_list = list(os.listdir(outputdir))
numbers = [int(x[6:-2]) for x in files_list if x[:6]=="params" and x[-2:]==".p"]
if len(numbers) == 0:
return None
most_recent = max(numbers)
return most_recent, os.path.join(outputdir,"params{}.p".format(most_recent))
示例9: softmax
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def softmax(x, axis=None):
'''
Applies softmax to x over the given axis (i.e. exp/sum(exp)).
'''
if isinstance(axis, int):
m = T.max(x, axis=axis, keepdims=True)
else:
m = T.max(x)
exp_x = T.exp(x - m)
Z = T.sum(exp_x, axis=axis, keepdims=True)
return exp_x / Z
#log softmax 層,輸入數據x
示例10: logsoftmax
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def logsoftmax(x, axis=None):
'''
Applies logsoftmax to x over the given axis (i.e. exp/sum(exp)).
'''
if isinstance(axis, int):
m = T.max(x, axis=axis, keepdims=True)
else:
m = T.max(x)
exp_x = T.exp(x - m)
Z = T.sum(exp_x, axis=axis, keepdims=True)
return x - m - T.log(Z)
示例11: pool2d
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
if border_mode == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
padding = (w_pad, h_pad)
elif border_mode == 'valid':
padding = (0, 0)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 3, 1, 2))
if pool_mode == 'max':
pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode='max')
elif pool_mode == 'avg':
pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode='average_exc_pad')
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
if border_mode == 'same':
expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
pool_out = pool_out[:, :,
: expected_width,
: expected_height]
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
示例12: log_sum_exp
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def log_sum_exp(x, axis=1):
m = T.max(x, axis=axis)
return m+T.log(T.sum(T.exp(x-m.dimshuffle(0,'x')), axis=axis))
示例13: __call__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def __call__(self, x):
e_x = T.exp(x - x.max(axis=1).dimshuffle(0, 'x'))
return e_x / e_x.sum(axis=1).dimshuffle(0, 'x')
示例14: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def __init__(self, ctrl_coef=1e-1):
def f(x, u, i, terminal):
# Original Gym does not impose a control cost, but does clip it
# to [-1, 1]. This non-linear dynamics is hard for iLQG to handle,
# so add a quadratic control penalty instead.
if terminal:
ctrl_cost = T.zeros_like(x[..., 0])
else:
ctrl_cost = T.square(u).sum(axis=-1)
# x: (batch_size, 6), concatenation of qpos & qvel
# Distance cost
# The tricky part is finding Cartesian coords of pole tip.
base_x = x[..., 0] # qpos[0]: x axis of the slider
hinge1_ang = x[..., 1] # qpos[1]: angle of the first hinge
hinge2_ang = x[..., 2] # qpos[2]: angle of the second hinge
hinge2_cum_ang = hinge1_ang + hinge2_ang
# 0 degrees is y=1, x=0; rotates clockwise.
hinge1_x, hinge1_y = T.sin(hinge1_ang), T.cos(hinge1_ang)
hinge2_x, hinge2_y = T.sin(hinge2_cum_ang), T.cos(hinge2_cum_ang)
tip_x = base_x + hinge1_x + hinge2_x
tip_y = hinge1_y + hinge2_y
dist_cost = 0.01 * T.square(tip_x) + T.square(tip_y - 2)
# Velocity cost
v1 = x[..., 4] # qvel[1]
v2 = x[..., 5] # qvel[2]
vel_cost = 1e-3 * T.square(v1) + 5e-3 * T.square(v2)
# TODO: termination penalty? (shouldn't change optimal policy?)
dist_below = T.max([T.zeros_like(tip_y), 1.1 - tip_y], axis=0)
termination_cost = T.square(dist_below)
cost = 5 * termination_cost + dist_cost + vel_cost + ctrl_coef * ctrl_cost
return cost
super().__init__(f, state_size=6, action_size=1)
示例15: get_output
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import max [as 別名]
def get_output(self, train=False):
X = self.get_input(train)
# -- don't need activation since it's just linear.
output = T.max(T.dot(X, self.W) + self.b, axis=1)
return output