本文整理汇总了Python中theano.tensor.min函数的典型用法代码示例。如果您正苦于以下问题:Python min函数的具体用法?Python min怎么用?Python min使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了min函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: define_network
def define_network(self, layers_info=None):
"""
Builds Theano graph of the network.
"""
self.hidden_layers = [None]*self.n_hidden.size
self.params = []
for i, h in enumerate(self.n_hidden):
if i == 0:
self.hidden_layers[i] = LBNHiddenLayer(self.rng, self.trng, self.x, self.n_in,
h, self.det_activation[i],
self.stoch_n_hidden, self.stoch_activation,
det_activation_name=self.det_activation_names[i],
stoch_activation_names=self.stoch_activation_names,
m=self.m,
det_W=None if layers_info is None else
np.array(
layers_info['hidden_layers'][i]['LBNlayer']['detLayer']\
['W']),
det_b=None if layers_info is None else
np.array(layers_info['hidden_layers'][i]\
['LBNlayer']['detLayer']['b']),
stoch_mlp_info=None if layers_info is None else
layers_info['hidden_layers'][i]['LBNlayer']['stochLayer'])
else:
self.hidden_layers[i] = LBNHiddenLayer(self.rng, self.trng,
self.hidden_layers[i-1].output,
self.n_hidden[i-1], h, self.det_activation[i],
self.stoch_n_hidden, self.stoch_activation,
det_activation_name=self.det_activation_names[i],
stoch_activation_names=self.stoch_activation_names,
det_W=None if layers_info is None else
np.array(layers_info['hidden_layers'][i]['LBNlayer']\
['detLayer']['W']),
det_b=None if layers_info is None else
np.array(layers_info['hidden_layers'][i]['LBNlayer']\
['detLayer']['b']),
stoch_mlp_info=None if layers_info is None else
layers_info['hidden_layers'][i]['LBNlayer']['stochLayer'])
self.params.append(self.hidden_layers[i].params)
self.output_layer = OutputLayer(self.rng, self.hidden_layers[-1].output, self.n_hidden[-1],
self.n_out, self.det_activation[-1],
self.det_activation_names[-1],
V_values=None
if layers_info is None else np.array(
layers_info['output_layer']['W']))
self.params.append(self.output_layer.params)
self.output = self.output_layer.output
exp_value = -0.5*T.sum((self.output - self.y.dimshuffle('x',0,1))**2, axis=2)
max_exp_value = theano.ifelse.ifelse(T.lt(T.max(exp_value), -1*T.min(exp_value)),
T.max(exp_value), T.min(exp_value))
self.log_likelihood = T.sum(T.log(T.sum(T.exp(exp_value - max_exp_value), axis=0)) +
max_exp_value)-\
self.y.shape[0]*(T.log(self.m)+self.y.shape[1]/2.*T.log(2*np.pi))
self.predict = theano.function(inputs=[self.x, self.m], outputs=self.output)
示例2: plotUpdate
def plotUpdate(self,updates):
'''
>>>get update info of each layer
>>>type updates: dict
>>>para updates: update dictionary
'''
maxdict=T.zeros(shape=(self.deep*2+1,))
mindict=T.zeros(shape=(self.deep*2+1,))
meandict=T.zeros(shape=(self.deep*2+1,))
for i in xrange(self.deep):
updw=updates[self.layers[i].w]-self.layers[i].w
maxdict=T.set_subtensor(maxdict[2*i],T.max(updw))
mindict=T.set_subtensor(mindict[2*i],T.min(updw))
meandict=T.set_subtensor(meandict[2*i],T.mean(updw))
updb=updates[self.layers[i].b]-self.layers[i].b
maxdict=T.set_subtensor(maxdict[2*i+1],T.max(updb))
mindict=T.set_subtensor(mindict[2*i+1],T.min(updb))
meandict=T.set_subtensor(meandict[2*i+1],T.mean(updb))
updw=updates[self.classifier.w]-self.classifier.w
maxdict=T.set_subtensor(maxdict[self.deep*2],T.max(updw))
mindict=T.set_subtensor(mindict[self.deep*2],T.min(updw))
meandict=T.set_subtensor(meandict[self.deep*2],T.mean(updw))
return [maxdict,mindict,meandict]
示例3: compute_S
def compute_S(idx, Sp1, zAA, zBB):
Sm = ifelse(T.eq(idx, nT-2),
T.dot(zBB[iib[-1]], Tla.matrix_inverse(zAA[iia[-1]])),
T.dot(zBB[iib[idx]],Tla.matrix_inverse(zAA[iia[T.min([idx+1,nT-2])]]
- T.dot(Sp1,T.transpose(zBB[iib[T.min([idx+1,nT-2])]]))))
)
return Sm
示例4: test_optimization_min
def test_optimization_min(self):
data = numpy.asarray(numpy.random.rand(2,3),dtype=config.floatX)
n = tensor.matrix()
f = function([n],tensor.min(n,0), mode=self.mode)
topo = f.maker.env.toposort()
assert len(topo)==1
assert isinstance(topo[0].op,CAReduce)
f(data)
#test variant with neg to make sure we optimize correctly
f = function([n],tensor.min(-n,0), mode=self.mode)
topo = f.maker.env.toposort()
assert len(topo)==2
assert isinstance(topo[0].op,CAReduce)#max
assert isinstance(topo[1].op, Elemwise)
assert isinstance(topo[1].op.scalar_op, scalar.Neg)
f(data)
f = function([n],-tensor.min(n,0), mode=self.mode)
topo = f.maker.env.toposort()
assert len(topo)==2
assert isinstance(topo[0].op, Elemwise)
assert isinstance(topo[0].op.scalar_op, scalar.Neg)
assert isinstance(topo[1].op,CAReduce)#max
f(data)
f = function([n],-tensor.min(-n,0), mode=self.mode)
topo = f.maker.env.toposort()
assert len(topo)==1
assert isinstance(topo[0].op,CAReduce)#max
f(data)
示例5: _test_layer_stats
def _test_layer_stats(self, layer_output):
"""
DESCRIPTION:
This method is called every batch whereby the examples from test or valid set
is pass through, the final result will be the mean of all the results from all
the batches in an epoch from the test set or valid set.
PARAM:
layer_output: the output from the layer
RETURN:
A list of tuples of [('name_a', var_a), ('name_b', var_b)] whereby var is scalar
"""
w_len = T.sqrt((self.W ** 2).sum(axis=0))
max_length = T.max(w_len)
mean_length = T.mean(w_len)
min_length = T.min(w_len)
return [('max_col_length', max_length),
('mean_col_length', mean_length),
('min_col_length', min_length),
('output_max', T.max(layer_output)),
('output_mean', T.mean(layer_output)),
('output_min', T.min(layer_output)),
('max_W', T.max(self.W)),
('mean_W', T.mean(self.W)),
('min_W', T.min(self.W)),
('max_b', T.max(self.b)),
('mean_b', T.mean(self.b)),
('min_b', T.min(self.b))]
示例6: norm
def norm(x,ord):
x = as_tensor_variable(x)
ndim = x.ndim
if ndim == 0:
raise ValueError("'axis' entry is out of bounds.")
elif ndim == 1:
if ord == None:
return tensor.sum(x**2)**0.5
elif ord == 'inf':
return tensor.max(abs(x))
elif ord == '-inf':
return tensor.min(abs(x))
elif ord == 0:
return x[x.nonzero()].shape[0]
else:
try:
z = tensor.sum(abs(x**ord))**(1./ord)
except TypeError:
raise ValueError("Invalid norm order for vectors.")
return z
elif ndim == 2:
if ord == None or ord == 'fro':
return tensor.sum(abs(x**2))**(0.5)
elif ord == 'inf':
return tensor.max(tensor.sum(abs(x), 1))
elif ord == '-inf':
return tensor.min(tensor.sum(abs(x), 1))
elif ord == 1:
return tensor.max(tensor.sum(abs(x), 0))
elif ord == -1:
return tensor.min(tensor.sum(abs(x),0))
else:
raise ValueError(0)
elif ndim > 2:
raise NotImplementedError("We don't support norm witn ndim > 2")
示例7: eig_pos_barrier
def eig_pos_barrier( theta = Th.dvector('theta'), M = Th.dmatrix('M') ,
STA = Th.dvector('STA'), STC = Th.dmatrix('STC'),
U = Th.dmatrix('U') , V1 = Th.dvector('V1'), **other):
'''
A barrier enforcing that the log-det of M should be > exp(-6),
and all the eigenvalues of M > 0. Returns true if barrier is violated.
'''
ImM = Th.identity_like(M)-(M+M.T)/2
w,v = eig( ImM )
return 1-(Th.sum(Th.log(w))>-250)*(Th.min(w)>0)*(Th.min(V1.flatten())>0) \
示例8: _get_hidden_layer_connectivity
def _get_hidden_layer_connectivity(self, layerIdx):
layer_size = self._hidden_sizes[layerIdx]
if layerIdx == 0:
p_vals = self._get_p(T.min(self.layers_connectivity[layerIdx]))
else:
p_vals = self._get_p(T.min(self.layers_connectivity_updates[layerIdx-1]))
# #Implementations of np.choose in theano GPU
# return T.nonzero(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX))[1].astype(dtype=theano.config.floatX)
# return T.argmax(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX), axis=1)
return T.sum(T.cumsum(self._mrng.multinomial(pvals=T.tile(p_vals[::-1][None, :], (layer_size, 1)), dtype=theano.config.floatX), axis=1), axis=1)
示例9: get_monitoring_channels
def get_monitoring_channels(self, V):
vb, hb, weights = self.get_params()
norms = theano_norms(weights)
return {'W_min': tensor.min(weights),
'W_max': tensor.max(weights),
'W_norm_mean': tensor.mean(norms),
'bias_hid_min' : tensor.min(hb),
'bias_hid_mean' : tensor.mean(hb),
'bias_hid_max' : tensor.max(hb),
'bias_vis_min' : tensor.min(vb),
'bias_vis_mean' : tensor.mean(vb),
'bias_vis_max': tensor.max(vb),
}
示例10: NRMSE
def NRMSE(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('flo'): #CHANGED!!!!!
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.sqrt(T.mean(T.sqr(y-self.y_pred))) / (T.max(y) - T.min(y)) #NRMSE
else:
raise NotImplementedError()
示例11: get_output_for
def get_output_for(self, input, init=False, **kwargs):
if input.ndim > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input = input.flatten(2)
activation = T.tensordot(input, self.W, [[1], [0]])
abs_dif = (T.sum(abs(activation.dimshuffle(0,1,2,'x') - activation.dimshuffle('x',1,2,0)),axis=2)
+ 1e6 * T.eye(input.shape[0]).dimshuffle(0,'x',1))
if init:
mean_min_abs_dif = 0.5 * T.mean(T.min(abs_dif, axis=2),axis=0)
abs_dif /= mean_min_abs_dif.dimshuffle('x',0,'x')
self.init_updates = [(self.log_weight_scale, self.log_weight_scale-T.log(mean_min_abs_dif).dimshuffle(0,'x'))]
f = T.sum(T.exp(-abs_dif),axis=2)
if init:
mf = T.mean(f,axis=0)
f -= mf.dimshuffle('x',0)
self.init_updates.append((self.b, -mf))
else:
f += self.b.dimshuffle('x',0)
return T.concatenate([input, f], axis=1)
示例12: make_consensus
def make_consensus(self, networks, axis=2):
cns = self.attrs['consensus']
if cns == 'max':
return T.max(networks, axis=axis)
elif cns == 'min':
return T.min(networks, axis=axis)
elif cns == 'mean':
return T.mean(networks, axis=axis)
elif cns == 'flat':
if self.depth == 1:
return networks
if axis == 2:
return networks.flatten(ndim=3)
#return T.reshape(networks, (networks.shape[0], networks.shape[1], T.prod(networks.shape[2:]) ))
else:
return networks.flatten(ndim=2) # T.reshape(networks, (networks.shape[0], T.prod(networks.shape[1:]) ))
elif cns == 'sum':
return T.sum(networks, axis=axis, acc_dtype=theano.config.floatX)
elif cns == 'prod':
return T.prod(networks, axis=axis)
elif cns == 'var':
return T.var(networks, axis=axis)
elif cns == 'project':
p = self.add_param(self.create_random_uniform_weights(self.attrs['n_out'], 1, self.attrs['n_out'] + self.depth + 1))
return T.tensordot(p, networks, [[1], [axis]])
elif cns == 'random':
idx = self.rng.random_integers(size=(1,), low=0, high=self.depth)
if axis == 0: return networks[idx]
if axis == 1: return networks[:,idx]
if axis == 2: return networks[:,:,idx]
if axis == 3: return networks[:,:,:,idx]
assert False, "axis too large"
else:
assert False, "consensus method unknown: " + cns
示例13: _best_path_decode
def _best_path_decode(activations):
"""Calculate the CTC best-path decoding for a given activation sequence.
In the returned matrix, shorter sequences are padded with -1s."""
# For each timestep, get the highest output
decoding = T.argmax(activations, axis=2)
# prev_outputs[time][example] == decoding[time - 1][example]
prev_outputs = T.concatenate([T.alloc(_BLANK, 1, decoding.shape[1]), decoding], axis=0)[:-1]
# Filter all repetitions to zero (blanks are already zero)
decoding = decoding * T.neq(decoding, prev_outputs)
# Calculate how many blanks each sequence has relative to longest sequence
blank_counts = T.eq(decoding, 0).sum(axis=0)
min_blank_count = T.min(blank_counts, axis=0)
max_seq_length = decoding.shape[0] - min_blank_count # used later
padding_needed = blank_counts - min_blank_count
# Generate the padding matrix by ... doing tricky things
max_padding_needed = T.max(padding_needed, axis=0)
padding_needed = padding_needed.dimshuffle('x',0).repeat(max_padding_needed, axis=0)
padding = T.arange(max_padding_needed).dimshuffle(0,'x').repeat(decoding.shape[1],axis=1)
padding = PADDING * T.lt(padding, padding_needed)
# Apply the padding
decoding = T.concatenate([decoding, padding], axis=0)
# Remove zero values
nonzero_vals = decoding.T.nonzero_values()
decoding = T.reshape(nonzero_vals, (decoding.shape[1], max_seq_length)).T
return decoding
示例14: test_max_pool_2d_3D
def test_max_pool_2d_3D(self):
rng = numpy.random.RandomState(utt.fetch_seed())
maxpoolshps = [(1,2)]
imval = rng.rand(2,3,4)
images = tensor.dtensor3()
for maxpoolshp in maxpoolshps:
for ignore_border in [True,False]:
#print 'maxpoolshp =', maxpoolshp
#print 'ignore_border =', ignore_border
numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp, ignore_border)
output = max_pool_2d(images, maxpoolshp, ignore_border)
output_val = function([images], output)(imval)
assert numpy.all(output_val == numpy_output_val)
c = tensor.sum(output)
c_val = function([images], c)(imval)
g = tensor.grad(c, images)
g_val = function([images],
[g.shape,
tensor.min(g, axis=(0,1,2)),
tensor.max(g, axis=(0,1,2))]
)(imval)
示例15: compute_probabilistic_matrix
def compute_probabilistic_matrix(self,X, y, num_cases, k=5):
z = T.dot(X, self.A) #Transform x into z space
dists = T.sqr(dist2hy(z,z))
dists = T.extra_ops.fill_diagonal(dists, T.max(dists)+1)
nv = T.min(dists,axis=1) # value of nearest neighbour
dists = (dists.T - nv).T
d = T.extra_ops.fill_diagonal(dists, 0)
#Take only k nearest
num = T.zeros((num_cases, self.num_classes))
denom = T.zeros((num_cases,))
for c_i in xrange(self.num_classes):
#Mask for class i
mask_i = T.eq(T.outer(T.ones_like(y),y),c_i)
#K nearest neighbour within a class i
dim_ci = T.sum(mask_i[0])
d_c_i = T.reshape(d[mask_i.nonzero()],(num_cases,dim_ci))
k_indice = T.argsort(d_c_i, axis=1)[:,0:k]
kd = T.zeros((num_cases,k))
for it in xrange(k):
kd = T.set_subtensor(kd[:,it], d_c_i[T.arange(num_cases),k_indice[:,it]])
#Numerator
value = T.exp(-T.mean(kd,axis=1))
num = T.set_subtensor(num[:,c_i], value)
denom += value
p = num / denom.dimshuffle(0,'x') #prob that point i will be correctly classified
return p