本文整理汇总了Python中theano.tensor.max函数的典型用法代码示例。如果您正苦于以下问题:Python max函数的具体用法?Python max怎么用?Python max使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了max函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: norm
def norm(x,ord):
x = as_tensor_variable(x)
ndim = x.ndim
if ndim == 0:
raise ValueError("'axis' entry is out of bounds.")
elif ndim == 1:
if ord == None:
return tensor.sum(x**2)**0.5
elif ord == 'inf':
return tensor.max(abs(x))
elif ord == '-inf':
return tensor.min(abs(x))
elif ord == 0:
return x[x.nonzero()].shape[0]
else:
try:
z = tensor.sum(abs(x**ord))**(1./ord)
except TypeError:
raise ValueError("Invalid norm order for vectors.")
return z
elif ndim == 2:
if ord == None or ord == 'fro':
return tensor.sum(abs(x**2))**(0.5)
elif ord == 'inf':
return tensor.max(tensor.sum(abs(x), 1))
elif ord == '-inf':
return tensor.min(tensor.sum(abs(x), 1))
elif ord == 1:
return tensor.max(tensor.sum(abs(x), 0))
elif ord == -1:
return tensor.min(tensor.sum(abs(x),0))
else:
raise ValueError(0)
elif ndim > 2:
raise NotImplementedError("We don't support norm witn ndim > 2")
示例2: plotUpdate
def plotUpdate(self,updates):
'''
>>>get update info of each layer
>>>type updates: dict
>>>para updates: update dictionary
'''
maxdict=T.zeros(shape=(self.deep*2+1,))
mindict=T.zeros(shape=(self.deep*2+1,))
meandict=T.zeros(shape=(self.deep*2+1,))
for i in xrange(self.deep):
updw=updates[self.layers[i].w]-self.layers[i].w
maxdict=T.set_subtensor(maxdict[2*i],T.max(updw))
mindict=T.set_subtensor(mindict[2*i],T.min(updw))
meandict=T.set_subtensor(meandict[2*i],T.mean(updw))
updb=updates[self.layers[i].b]-self.layers[i].b
maxdict=T.set_subtensor(maxdict[2*i+1],T.max(updb))
mindict=T.set_subtensor(mindict[2*i+1],T.min(updb))
meandict=T.set_subtensor(meandict[2*i+1],T.mean(updb))
updw=updates[self.classifier.w]-self.classifier.w
maxdict=T.set_subtensor(maxdict[self.deep*2],T.max(updw))
mindict=T.set_subtensor(mindict[self.deep*2],T.min(updw))
meandict=T.set_subtensor(meandict[self.deep*2],T.mean(updw))
return [maxdict,mindict,meandict]
示例3: test_max
def test_max(self):
# If we call max directly, we will return an CAReduce object
# and he don't have R_op implemented!
# self.check_mat_rop_lop(tensor.max(self.mx, axis=[0,1])[0],
# ())
self.check_mat_rop_lop(tensor.max(self.mx, axis=0), (self.mat_in_shape[1],))
self.check_mat_rop_lop(tensor.max(self.mx, axis=1), (self.mat_in_shape[0],))
示例4: _activation
def _activation(self, Y, L, M, W):
"""Returns the activation for a given input.
Derived from the generative model formulation of hierarchical
Poisson mixtures, the formular for the activation in the network
reads as follows:
I_c =
\sum_d \log(W_{cd})y_d + \log(M_{lc}) for labeled data
\sum_d \log(W_{cd})y_d + \log(\sum_k M_{kc}) for unlabeled data
s_c = softmax(I_c)
"""
# first: complete inference to find label
# Input integration:
I = T.tensordot(Y,T.log(W),axes=[1,1])
# recurrent term:
vM = M[L]
L_index = T.eq(L,-1).nonzero()
vM = T.set_subtensor(vM[L_index], T.sum(M, axis=0))
# numeric trick to prevent overflow in the exp-function
max_exponent = 86. - T.ceil(T.log(I.shape[1].astype('float32')))
scale = T.switch(
T.gt(T.max(I, axis=1, keepdims=True), max_exponent),
T.max(I, axis=1, keepdims=True) - max_exponent,
0.)
# numeric approximation to prevent underflow in the exp-function:
# map too low values of I to a fixed minimum value
min_exponent = -87. + T.ceil(T.log(I.shape[1].astype('float32')))
I = T.switch(
T.lt(I-scale, min_exponent),
scale+min_exponent,
I)
# activation: recurrent softmax with overflow protection
s = vM*T.exp(I-scale)/T.sum(vM*T.exp(I-scale), axis=1, keepdims=True)
return s
示例5: predict
def predict(self, new_data, batch_size, pool_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
pool_list = []
if self.non_linear == "tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle("x", 0, "x", "x"))
# pad_len = int(self.max_window_len/2)
# right_pad_len = int(self.filter_shape[2]/2)
# index_shift = pad_len-right_pad_len
index_shift = int(self.filter_shape[2] / 2)
for i in xrange(batch_size):
# partition sentence via pool size
e1pos = pool_size[i, 0] + index_shift
e2pos = pool_size[i, 1] + index_shift
# if T.gt(e1pos, 0):
# p1 = conv_out_tanh[i, :, :e1pos, :]
# else:
# p1 = conv_out_tanh[i, :, 0, :]
p1 = conv_out_tanh[i, :, :e1pos, :]
p2 = conv_out_tanh[i, :, e1pos:e2pos, :]
p3 = conv_out_tanh[i, :, e2pos:, :]
p1_pool_out = T.max(p1, axis=1)
p2_pool_out = T.max(p2, axis=1)
p3_pool_out = T.max(p3, axis=1)
temp = T.concatenate([p1_pool_out, p2_pool_out, p3_pool_out], axis=1)
pool_list.append(temp.dimshuffle("x", 0, 1))
else:
pass
output = T.concatenate(pool_list, axis=0)
return output
示例6: define_network
def define_network(self, layers_info=None):
"""
Builds Theano graph of the network.
"""
self.hidden_layers = [None]*self.n_hidden.size
self.params = []
for i, h in enumerate(self.n_hidden):
if i == 0:
self.hidden_layers[i] = LBNHiddenLayer(self.rng, self.trng, self.x, self.n_in,
h, self.det_activation[i],
self.stoch_n_hidden, self.stoch_activation,
det_activation_name=self.det_activation_names[i],
stoch_activation_names=self.stoch_activation_names,
m=self.m,
det_W=None if layers_info is None else
np.array(
layers_info['hidden_layers'][i]['LBNlayer']['detLayer']\
['W']),
det_b=None if layers_info is None else
np.array(layers_info['hidden_layers'][i]\
['LBNlayer']['detLayer']['b']),
stoch_mlp_info=None if layers_info is None else
layers_info['hidden_layers'][i]['LBNlayer']['stochLayer'])
else:
self.hidden_layers[i] = LBNHiddenLayer(self.rng, self.trng,
self.hidden_layers[i-1].output,
self.n_hidden[i-1], h, self.det_activation[i],
self.stoch_n_hidden, self.stoch_activation,
det_activation_name=self.det_activation_names[i],
stoch_activation_names=self.stoch_activation_names,
det_W=None if layers_info is None else
np.array(layers_info['hidden_layers'][i]['LBNlayer']\
['detLayer']['W']),
det_b=None if layers_info is None else
np.array(layers_info['hidden_layers'][i]['LBNlayer']\
['detLayer']['b']),
stoch_mlp_info=None if layers_info is None else
layers_info['hidden_layers'][i]['LBNlayer']['stochLayer'])
self.params.append(self.hidden_layers[i].params)
self.output_layer = OutputLayer(self.rng, self.hidden_layers[-1].output, self.n_hidden[-1],
self.n_out, self.det_activation[-1],
self.det_activation_names[-1],
V_values=None
if layers_info is None else np.array(
layers_info['output_layer']['W']))
self.params.append(self.output_layer.params)
self.output = self.output_layer.output
exp_value = -0.5*T.sum((self.output - self.y.dimshuffle('x',0,1))**2, axis=2)
max_exp_value = theano.ifelse.ifelse(T.lt(T.max(exp_value), -1*T.min(exp_value)),
T.max(exp_value), T.min(exp_value))
self.log_likelihood = T.sum(T.log(T.sum(T.exp(exp_value - max_exp_value), axis=0)) +
max_exp_value)-\
self.y.shape[0]*(T.log(self.m)+self.y.shape[1]/2.*T.log(2*np.pi))
self.predict = theano.function(inputs=[self.x, self.m], outputs=self.output)
示例7: filterbank_matrices
def filterbank_matrices(self, center_y, center_x, delta, sigma):
"""
Create a Fy and a Fx
Parameters
----------
center_y : T.vector (shape: batch_size)
center_x : T.vector (shape: batch_size)
Y and X center coordinates for the attention window
delta : T.vector (shape: batch_size)
sigma : T.vector (shape: batch_size)
Returns
-------
FY, FX
"""
tol = 1e-4
# construct x and y coordinates for the grid points
obj_x = center_x.dimshuffle(0, 'x') + \
(delta.dimshuffle(0, 'x') * self.obj_x)
obj_y = center_y.dimshuffle(0, 'x') + \
(delta.dimshuffle(0, 'x') * self.obj_y)
# construct unnormalized attention weights for each grid point
FX = T.exp( -(self.img_x - obj_x.dimshuffle(0,1,'x'))**2. / \
(2. * sigma.dimshuffle(0,'x','x')**2.) )
FY = T.exp( -(self.img_y - obj_y.dimshuffle([0,1,'x']))**2. / \
(2. * sigma.dimshuffle(0,'x','x')**2.) )
# normalize the attention weights
#FX = FX / (FX.sum(axis=-1).dimshuffle(0, 1, 'x') + tol)
#FY = FY / (FY.sum(axis=-1).dimshuffle(0, 1, 'x') + tol)
FX = FX / (T.max(FX.sum(axis=-1)) + tol)
FY = FY / (T.max(FY.sum(axis=-1)) + tol)
return FY, FX
示例8: pool_function
def pool_function(input, axis):
input_shape = tuple(input.shape)
num_feature_maps_out = input_shape[axis - 1]
pool_size = input_shape[axis]
pool_shape = (input_shape[:axis] + (num_in_sum,
num_in_max) + input_shape[axis + 1:])
# print("make_ghh_pool_conv2d: pool_shape is {}".format(pool_shape))
input_reshaped = input.reshape(pool_shape)
# raise NotImplementedError('TODO: use a soft max instead of T.max')
# res_after_max = T.max(input_reshaped,axis=axis+1)
# Soft max with strength of max_strength
res_after_max = np.cast[floatX](1.0) / np.cast[floatX](max_strength) \
* T.log(T.mean(T.exp(max_strength * (input_reshaped - T.max(input_reshaped, axis=axis + 1, keepdims=True))), axis=axis + 1)) \
+ T.max(input_reshaped, axis=axis + 1)
# Get deltas
delta = np.cast[floatX](1.0) - np.cast[floatX](2.0) * \
(T.arange(num_in_sum, dtype=floatX) % np.cast[floatX](2))
target_dimshuffle = ('x',) * axis + (0,) + ('x',) * \
(len(input_shape) - 1 - axis)
# print("make_ghh_pool_conv2d: target_dimshuffle is {}".format(target_dimshuffle))
delta = delta.flatten().dimshuffle(*target_dimshuffle)
res_after_sum = T.sum(res_after_max * delta, axis=axis)
return res_after_sum
示例9: forward_init
def forward_init(self):
obs_ = self.obs_.reshape([self.obs_.shape[0]*self.obs_.shape[1], self.obs_.shape[-1]])
h = eval(self.activ)(tensor.dot(obs_, self.params['W']) + self.params['b'][None,None,:])
self.pi = []
for oi in xrange(self.n_out):
pi = tensor.dot(h, self.params['U%d'%oi]) + self.params['c%d'%oi][None,:]
pi = tensor.exp(pi - tensor.max(pi,-1,keepdims=True))
self.pi.append(pi / (pi.sum(-1, keepdims=True)))
prev = tensor.matrix('prev', dtype='float32')
#obs = tensor.matrix('obs', dtype='float32')
obs_ = self.obs_.reshape([self.obs_.shape[0]*self.obs_.shape[1],
self.obs_.shape[-1]])
obs_ = obs_[0]
self.h_init = lambda x: numpy.float32(0.)
h = eval(self.activ)(tensor.dot(obs_, self.params['W']) + self.params['b'][None,:])
pi = []
for oi in xrange(self.n_out):
pi_ = tensor.dot(h, self.params['U%d'%oi]) + self.params['c%d'%oi][None,:]
pi_ = tensor.exp(pi_ - tensor.max(pi_,-1,keepdims=True))
pi.append(pi_ / (pi_.sum(-1, keepdims=True)))
self.forward = theano.function([self.obs, prev], [h] + pi, name='forward', on_unused_input='ignore')
示例10: maxout
def maxout(z = None):
#g = theano.shared(numpy.zeros((hidden_layers_sizes[i],)),name='g',borrow=True)
g = T.max(z[0:5])
g = T.stack(g,T.max(z[5:10]))
for index in xrange(hidden_layers_sizes[i]-10):
g = T.concatenate([g,[T.max(z[5*(index+2):5*(index+3)])]])
return g
示例11: compile_gpu_func
def compile_gpu_func(nan_is_error, inf_is_error, big_is_error):
""" compile utility function used by contains_nan and contains_inf
"""
global f_gpumin, f_gpumax, f_gpuabsmax
if not cuda.cuda_available:
return
guard_input = cuda.fvector("nan_guard")
cuda_compile_failed = False
if (nan_is_error or inf_is_error) and f_gpumin is None:
try:
f_gpumin = theano.function([guard_input], T.min(guard_input), mode="FAST_RUN")
except RuntimeError:
# This can happen if cuda is available, but the
# device is in exclusive mode and used by another
# process.
cuda_compile_failed = True
if inf_is_error and not cuda_compile_failed and f_gpumax is None:
try:
f_gpumax = theano.function([guard_input], T.max(guard_input), mode="FAST_RUN")
except RuntimeError:
# This can happen if cuda is available, but the
# device is in exclusive mode and used by another
# process.
cuda_compile_failed = True
if big_is_error and not cuda_compile_failed and f_gpuabsmax is None:
try:
f_gpuabsmax = theano.function([guard_input], T.max(T.abs_(guard_input)), mode="FAST_RUN")
except RuntimeError:
# This can happen if cuda is available, but the
# device is in exclusive mode and used by another
# process.
cuda_compile_failed = True
示例12: __theano__softmax
def __theano__softmax(self, inp, dim=None, predict=False, issequence=False):
if dim is None:
assert issequence, "Data dimensionality could not be parsed."
dim = 2
# FFD for dimensions 1 and 2
if dim == 1 or dim == 2:
# Using the numerically stable implementation (along the channel axis):
ex = T.exp(inp - T.max(inp, axis=1, keepdims=True))
y = ex / T.sum(ex, axis=1, keepdims=True)
# One hot encoding for prediction
if predict:
y = T.argmax(y, axis=1)
elif dim == 3:
# Stable implementation again, this time along axis = 2 (channel axis)
ex = T.exp(inp - T.max(inp, axis=2, keepdims=True))
y = ex / T.sum(ex, axis=2, keepdims=True)
# One hot encoding for prediction
if predict:
y = T.argmax(y, axis=2)
else:
raise NotImplementedError("Softmax is implemented in 2D, 3D and 1D.")
return y
示例13: test_optimization_max
def test_optimization_max(self):
data = numpy.asarray(numpy.random.rand(2,3),dtype=config.floatX)
n = tensor.matrix()
f = function([n],tensor.max(n,0), mode=self.mode)
topo = f.maker.env.toposort()
assert len(topo)==1
assert isinstance(topo[0].op,CAReduce)
f(data)
f = function([n],tensor.max(-n,0), mode=self.mode)
topo = f.maker.env.toposort()
assert len(topo)==2
assert isinstance(topo[0].op, Elemwise)
assert isinstance(topo[0].op.scalar_op, scalar.Neg)
assert isinstance(topo[1].op,CAReduce)
f(data)
f = function([n],-tensor.max(n,0), mode=self.mode)
topo = f.maker.env.toposort()
assert len(topo)==2
assert isinstance(topo[0].op,CAReduce)
assert isinstance(topo[1].op, Elemwise)
assert isinstance(topo[1].op.scalar_op, scalar.Neg)
f(data)
f = function([n],-tensor.max(-n,0), mode=self.mode)
topo = f.maker.env.toposort()
assert len(topo)==1
assert isinstance(topo[0].op,CAReduce)#min
f(data)
示例14: _test_layer_stats
def _test_layer_stats(self, layer_output):
"""
DESCRIPTION:
This method is called every batch whereby the examples from test or valid set
is pass through, the final result will be the mean of all the results from all
the batches in an epoch from the test set or valid set.
PARAM:
layer_output: the output from the layer
RETURN:
A list of tuples of [('name_a', var_a), ('name_b', var_b)] whereby var is scalar
"""
w_len = T.sqrt((self.W ** 2).sum(axis=0))
max_length = T.max(w_len)
mean_length = T.mean(w_len)
min_length = T.min(w_len)
return [('max_col_length', max_length),
('mean_col_length', mean_length),
('min_col_length', min_length),
('output_max', T.max(layer_output)),
('output_mean', T.mean(layer_output)),
('output_min', T.min(layer_output)),
('max_W', T.max(self.W)),
('mean_W', T.mean(self.W)),
('min_W', T.min(self.W)),
('max_b', T.max(self.b)),
('mean_b', T.mean(self.b)),
('min_b', T.min(self.b))]
示例15: update_log_p
def update_log_p(skip_idxs,zeros,active,log_p_curr,log_p_prev):
active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
active_next = T.cast(T.minimum(
T.maximum(
active + 1,
T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
),
log_p_curr.shape[0]
), 'int32')
common_factor = T.max(log_p_prev[:active])
p_prev = T.exp(log_p_prev[:active] - common_factor)
_p_prev = zeros[:active_next]
# copy over
_p_prev = T.set_subtensor(_p_prev[:active], p_prev)
# previous transitions
_p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
# skip transitions
_p_prev = T.inc_subtensor(
_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
updated_log_p_prev = T.log(_p_prev) + common_factor
log_p_next = T.set_subtensor(
zeros[:active_next],
log_p_curr[:active_next] + updated_log_p_prev
)
return active_next, log_p_next