本文整理汇总了Python中theano.tensor.std函数的典型用法代码示例。如果您正苦于以下问题:Python std函数的具体用法?Python std怎么用?Python std使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了std函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _output
def _output(self, input, *args, **kwargs):
input = self.input_layer.output()
out = T.switch(T.gt(input, 0), 1, 0)
if out.ndim > 2:
std = T.std(out, axis=(0, 2, 3))
else:
std = T.std(out, axis=0)
return T.concatenate([T.mean(std).reshape((1,)), T.mean(out).reshape((1,))])
示例2: cross_correlation
def cross_correlation(x, y):
x_mean = mean(x)
y_mean = mean(y)
x_stdev = std(x)
y_stdev = std(y)
y_dev = y - y_mean
x_dev = x - x_mean
return 1 - (mean(x_dev*y_dev / (x_stdev*y_stdev)))
示例3: __build_center
def __build_center(self):
# We only want to compile our theano functions once
imgv = T.dtensor3("imgv")
# Get the mean
u = T.mean(imgv, 0)
# Get the standard deviation
s = T.std(T.std(imgv, 0), 0)
# Subtract our mean
return function(inputs=[imgv], outputs=[(imgv - u) / s])
示例4: batch_normalize
def batch_normalize(Y):
"""
Set columns of Y to zero mean and unit variance.
"""
Y_zmuv = (Y - T.mean(Y, axis=0, keepdims=True)) / \
T.std(Y, axis=0, keepdims=True)
return Y_zmuv
示例5: correlation
def correlation(input1,input2):
n=T.shape(input1)
n0=n[0]
n1=n[1]
s0=T.std(input1,axis=1,keepdims=True)#.reshape((n0,1)),reps=n1)
s1=T.std(input2,axis=1,keepdims=True)#.reshape((n0,1)),reps=n1)
m0=T.mean(input1,axis=1,keepdims=True)
m1=T.mean(input2,axis=1,keepdims=True)
corr=T.sum(((input1-m0)/s0)*((input2-m1)/s1), axis=1)/n1
corr=(corr+np.float32(1.))/np.float32(2.)
corr=T.reshape(corr,(n0,))
return corr
示例6: _train_fprop
def _train_fprop(self, state_below):
miu = state_below.mean(axis=0)
std = T.std(state_below, axis=0)
self.moving_mean += self.mem * miu + (1-self.mem) * self.moving_mean
self.moving_std += self.mem * std + (1-self.mem) * self.moving_std
Z = (state_below - self.moving_mean) / (self.moving_std + self.epsilon)
return self.gamma * Z + self.beta
示例7: get_stats
def get_stats(input, stat=None):
"""
Returns a dictionary mapping the name of the statistic to the result on the input.
Currently gets mean, var, std, min, max, l1, l2.
Parameters
----------
input : tensor
Theano tensor to grab stats for.
Returns
-------
dict
Dictionary of all the statistics expressions {string_name: theano expression}
"""
stats = {
'mean': T.mean(input),
'var': T.var(input),
'std': T.std(input),
'min': T.min(input),
'max': T.max(input),
'l1': input.norm(L=1),
'l2': input.norm(L=2),
#'num_nonzero': T.sum(T.nonzero(input)),
}
stat_list = raise_to_list(stat)
compiled_stats = {}
if stat_list is None:
return stats
for stat in stat_list:
if isinstance(stat, string_types) and stat in stats:
compiled_stats.update({stat: stats[stat]})
return compiled_stats
示例8: _build_activation
def _build_activation(self, act=None):
'''Given an activation description, return a callable that implements it.
'''
def compose(a, b):
c = lambda z: b(a(z))
c.__theanets_name__ = '%s(%s)' % (b.__theanets_name__, a.__theanets_name__)
return c
act = act or self.args.activation.lower()
if '+' in act:
return reduce(compose, (self._build_activation(a) for a in act.split('+')))
options = {
'tanh': TT.tanh,
'linear': lambda z: z,
'logistic': TT.nnet.sigmoid,
'softplus': TT.nnet.softplus,
# shorthands
'relu': lambda z: TT.maximum(0, z),
# modifiers
'rect:max': lambda z: TT.minimum(1, z),
'rect:min': lambda z: TT.maximum(0, z),
# normalization
'norm:dc': lambda z: (z.T - z.mean(axis=1)).T,
'norm:max': lambda z: (z.T / TT.maximum(1e-10, abs(z).max(axis=1))).T,
'norm:std': lambda z: (z.T / TT.maximum(1e-10, TT.std(z, axis=1))).T,
}
for k, v in options.iteritems():
v.__theanets_name__ = k
try:
return options[act]
except:
raise KeyError('unknown --activation %s' % act)
示例9: model
def model(self, X, w1, w2, w3, w4, w5, w6,w_o, p_drop_conv, p_drop_hidden):
l1a = l.rectify(conv2d(X, w1, border_mode='valid') + self.b1)
l1 = max_pool_2d(l1a, (2, 2), ignore_border=True)
#l1 = l.dropout(l1, p_drop_conv)
l2a = l.rectify(conv2d(l1, w2,border_mode='valid') + self.b2)
l2 = max_pool_2d(l2a, (2, 2), ignore_border=True)
#l2 = l.dropout(l2, p_drop_conv)
l3 = l.rectify(conv2d(l2, w3, border_mode='valid') + self.b3)
#l3 = l.dropout(l3a, p_drop_conv)
l4a = l.rectify(conv2d(l3, w4, border_mode='valid') + self.b4)
l4 = max_pool_2d(l4a, (2, 2), ignore_border=True)
#l4 = T.flatten(l4, outdim=2)
#l4 = l.dropout(l4, p_drop_conv)
l5 = l.rectify(conv2d(l4, w5, border_mode='valid') + self.b5)
#l5 = l.dropout(l5, p_drop_hidden)
l6 = l.rectify(conv2d(l5, w6, border_mode='valid') + self.b6)
#l6 = l.dropout(l6, p_drop_hidden)
#l6 = self.bn(l6, self.g,self.b,self.m,self.v)
l6 = conv2d(l6, w_o, border_mode='valid')
#l6 = self.bn(l6, self.g, self.b, T.mean(l6, axis=1), T.std(l6,axis=1))
l6 = T.flatten(l6, outdim=2)
#l6 = ((l6 - T.mean(l6, axis=0))/T.std(l6,axis=0))*self.g + self.b#self.bn( l6, self.g,self.b,T.mean(l6, axis=0),T.std(l6,axis=0) )
l6 = ((l6 - T.mean(l6, axis=0))/(T.std(l6,axis=0)+1e-4))*self.g + self.b
pyx = T.nnet.softmax(l6)
return l1, l2, l3, l4, l5, l6, pyx
示例10: collect_statistics
def collect_statistics(self, X):
"""Updates Statistics of data"""
stat_mean = T.mean(X, axis=0)
stat_std = T.std(X, axis=0)
updates_stats = [(self.stat_mean, stat_mean), (self.stat_std, stat_std)]
return updates_stats
示例11: setup_model
def setup_model():
# shape: T x B x F
input_ = T.tensor3('features')
# shape: B
target = T.lvector('targets')
model = LSTMAttention(dim=500,
mlp_hidden_dims=[400, 4],
batch_size=100,
image_shape=(100, 100),
patch_shape=(28, 28),
weights_init=Glorot(),
biases_init=Constant(0))
model.initialize()
h, c, location, scale = model.apply(input_)
classifier = MLP([Rectifier(), Softmax()], [500, 100, 10],
weights_init=Glorot(),
biases_init=Constant(0))
model.h = h
classifier.initialize()
probabilities = classifier.apply(h[-1])
cost = CategoricalCrossEntropy().apply(target, probabilities)
error_rate = MisclassificationRate().apply(target, probabilities)
location_x_avg = T.mean(location[:, 0])
location_x_avg.name = 'location_x_avg'
location_y_avg = T.mean(location[:, 1])
location_y_avg.name = 'location_y_avg'
scale_x_avg = T.mean(scale[:, 0])
scale_x_avg.name = 'scale_x_avg'
scale_y_avg = T.mean(scale[:, 1])
scale_y_avg.name = 'scale_y_avg'
location_x_std = T.std(location[:, 0])
location_x_std.name = 'location_x_std'
location_y_std = T.std(location[:, 1])
location_y_std.name = 'location_y_std'
scale_x_std = T.std(scale[:, 0])
scale_x_std.name = 'scale_x_std'
scale_y_std = T.std(scale[:, 1])
scale_y_std.name = 'scale_y_std'
monitorings = [error_rate,
location_x_avg, location_y_avg, scale_x_avg, scale_y_avg,
location_x_std, location_y_std, scale_x_std, scale_y_std]
return cost, monitorings
示例12: _layer_stats
def _layer_stats(self, state_below, layer_output):
ls = super(PRELU, self)._layer_stats(state_below, layer_output)
rlist = []
rlist.append(('alpha_mean', T.mean(self.alpha)))
rlist.append(('alpha_max', T.max(self.alpha)))
rlist.append(('alpha_min', T.min(self.alpha)))
rlist.append(('alpha_std', T.std(self.alpha)))
return ls + rlist
示例13: get_output_for
def get_output_for(self, input, **kwargs):
input1=input[0,]
input2=input[1,]
n=self.input_shape
#n0=n[1]
n1=n[2]
# tt=tuple([n0,1])
s0=T.std(input1,axis=1,keepdims=True)
s1=T.std(input2,axis=1,keepdims=True)
m0=T.mean(input1,axis=1,keepdims=True)
m1=T.mean(input2,axis=1,keepdims=True)
corr=T.sum(((input1-m0)/s0)*((input2-m1)/s1), axis=1)/n1
corr=(corr+np.float32(1.))/np.float32(2.)
return corr
示例14: testFcn
def testFcn(self,massBinned,trainY,trainX):
y = T.dvector('y')
varBinned = T.ivector('var')
baseHist = T.bincount(varBinned,1-y)+0.01
selectedHist = T.bincount(varBinned,(1-y)*self.outLayer.P[T.arange(y.shape[0]),1])+0.01
print baseHist.eval({y:trainY, varBinned:massBinned}), selectedHist.eval({y:trainY, varBinned:massBinned, self.input:trainX})
rTensor = T.std(selectedHist/baseHist)
return (rTensor).eval({y:trainY, varBinned:massBinned, self.input:trainX})
示例15: get_output_for
def get_output_for(self, input, **kwargs):
output_shape = input.shape
if input.ndim > 2:
input = T.flatten(input, 2)
if self.norm_type == "mean_var":
input -= T.mean(input, axis=1, keepdims=True)
input /= T.std(input, axis=1, keepdims=True)
input = input.reshape(output_shape)
return input