本文整理汇总了Python中theano.tensor.maximum方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.maximum方法的具体用法?Python tensor.maximum怎么用?Python tensor.maximum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.maximum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ctc_update_log_p
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
active_next = T.cast(T.minimum(
T.maximum(
active + 1,
T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
), log_p_curr.shape[0]), 'int32')
common_factor = T.max(log_p_prev[:active])
p_prev = T.exp(log_p_prev[:active] - common_factor)
_p_prev = zeros[:active_next]
# copy over
_p_prev = T.set_subtensor(_p_prev[:active], p_prev)
# previous transitions
_p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
# skip transitions
_p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
updated_log_p_prev = T.log(_p_prev) + common_factor
log_p_next = T.set_subtensor(
zeros[:active_next],
log_p_curr[:active_next] + updated_log_p_prev
)
return active_next, log_p_next
示例2: define_cost
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def define_cost(self, pred, y0, m0):
bsize = self.bsize
npix = int(np.prod(test_shape(y0)[1:]))
y0_target = y0.reshape((self.bsize, npix))
y0_mask = m0.reshape((self.bsize, npix))
pred = pred.reshape((self.bsize, npix))
p = pred * y0_mask
t = y0_target * y0_mask
d = (p - t)
nvalid_pix = T.sum(y0_mask, axis=1)
depth_cost = (T.sum(nvalid_pix * T.sum(d**2, axis=1))
- 0.5*T.sum(T.sum(d, axis=1)**2)) \
/ T.maximum(T.sum(nvalid_pix**2), 1)
return depth_cost
示例3: get_noise
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def get_noise(self, size):
# Allow just requesting batch size
if isinstance(size, int):
size = (size, self.get_input_space().get_total_dimension())
if not hasattr(self, 'noise'):
self.noise = "gaussian"
if self.noise == "uniform":
return self.theano_rng.uniform(low=-np.sqrt(3), high=np.sqrt(3), size=size, dtype='float32')
elif self.noise == "gaussian":
return self.theano_rng.normal(size=size, dtype='float32')
elif self.noise == "spherical":
noise = self.theano_rng.normal(size=size, dtype='float32')
noise = noise / T.maximum(1e-7, T.sqrt(T.sqr(noise).sum(axis=1))).dimshuffle(0, 'x')
return noise
else:
raise NotImplementedError(self.noise)
示例4: max_pool_2d_same_size
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def max_pool_2d_same_size(input, patch_size):
"""
Takes as input a 4-D tensor. It sets all non maximum values
of non-overlapping patches of size (patch_size[0],patch_size[1]) to zero,
keeping only the maximum values. The output has the same dimensions as
the input.
Parameters
----------
input : 4-D theano tensor of input images
Input images. Max pooling will be done over the 2 last dimensions.
patch_size : tuple of length 2
Size of the patch (patch height, patch width).
(2,2) will retain only one non-zero value per patch of 4 values.
"""
output = Pool(patch_size, True)(input)
outs = MaxPoolGrad(patch_size, True)(input, output, output)
return outs
示例5: compute_emb
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def compute_emb(x, W):
def _step(xi, emb, W):
if prm.att_doc:
new_shape = (xi.shape[0], xi.shape[1], xi.shape[2], prm.dim_emb)
else:
new_shape = (xi.shape[0], xi.shape[1], prm.dim_emb)
out = W[xi.flatten()].reshape(new_shape).sum(-2)
return out / tensor.maximum(1., tensor.neq(xi,-1).astype('float32').sum(-1, keepdims=True))
if prm.att_doc:
emb_init = tensor.alloc(0., x.shape[1], x.shape[2], prm.dim_emb)
else:
emb_init = tensor.alloc(0., x.shape[1], prm.dim_emb)
(embs), scan_updates = theano.scan(_step,
sequences=[x],
outputs_info=[emb_init],
non_sequences=[W],
name='emb_scan',
n_steps=x.shape[0])
return embs
示例6: adamax_updates
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def adamax_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
updates = []
grads = T.grad(cost, params)
for p, g in zip(params, grads):
mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
if mom1>0:
v_t = mom1*v + (1. - mom1)*g
updates.append((v,v_t))
else:
v_t = g
mg_t = T.maximum(mom2*mg, abs(g))
g_t = v_t / (mg_t + 1e-6)
p_t = p - lr * g_t
updates.append((mg, mg_t))
updates.append((p, p_t))
return updates
示例7: AdaMax
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def AdaMax(w, objective, alpha=.01, beta1=.1, beta2=.001):
print 'AdaMax', 'alpha:',alpha,'beta1:',beta1,'beta2:',beta2
g = T.grad(objective.sum(), w, disconnected_inputs='warn')
new = OrderedDict()
for i in range(len(w)):
#gi = T.switch(T.isnan(gi),T.zeros_like(gi),gi) #remove NaN's
mom1 = G.sharedf(w[i].get_value() * 0.)
_max = G.sharedf(w[i].get_value() * 0.)
new[mom1] = (1-beta1) * mom1 + beta1 * g[i]
new[_max] = T.maximum((1-beta2)*_max, abs(g[i]) + 1e-8)
new[w[i]] = w[i] + alpha * new[mom1] / new[_max]
return new
# AdaMax that averages over multiple minibatches
示例8: my_max_pool_2d
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def my_max_pool_2d(sym_input, pool_shape = (2,2)):
""" this one is pure theano. Hence all gradient-related stuff is working! No dimshuffling"""
s = None
for i in xrange(pool_shape[1]):
t = sym_input[:,:,:,i::pool_shape[1]]
if s is None:
s = t
else:
s = T.maximum(s, t)
temp = s
s = None
for i in xrange(pool_shape[0]):
t = temp[:,:,i::pool_shape[0],:]
if s is None:
s = t
else:
s = T.maximum(s, t)
sym_ret = s
return sym_ret
示例9: max_pool_along_channel_axis
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def max_pool_along_channel_axis(sym_input, pool_factor):
""" for 3D conv."""
s = None
for i in xrange(pool_factor):
t = sym_input[:,:,i::pool_factor]
if s is None:
s = t
else:
s = T.maximum(s, t)
return s
# Ns, Ts, C, Hs, Ws = 1, 70, 1, 70, 70 -> 70^3
# Nf, Tf, C, Hf, Wf = 32, 5 , 1, 5 , 5 -> 32 filters of shape 5^3
# signals = numpy.arange(Ns*Ts*C*Hs*Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
# filters = numpy.arange(Nf*Tf*C*Hf*Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
#
# in 3D
# input: (1, 70, 3, 70, 70)
# filters: (32, 5 , 3, 5 , 5)
# --> output: (1, 66, 32, 66, 66)
示例10: compute_weights
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def compute_weights(self, energies, attended_mask):
if self.energy_normalizer == 'softmax':
logger.debug("Using softmax attention weights normalization")
energies = energies - energies.max(axis=0)
unnormalized_weights = tensor.exp(energies)
elif self.energy_normalizer == 'logistic':
logger.debug("Using smoothfocus (logistic sigm) "
"attention weights normalization")
unnormalized_weights = tensor.nnet.sigmoid(energies)
elif self.energy_normalizer == 'relu':
logger.debug("Using ReLU attention weights normalization")
unnormalized_weights = tensor.maximum(energies/1000., 0.0)
else:
raise Exception("Unknown energey_normalizer: {}"
.format(self.energy_computer))
if attended_mask:
unnormalized_weights *= attended_mask
# If mask consists of all zeros use 1 as the normalization coefficient
normalization = (unnormalized_weights.sum(axis=0) +
tensor.all(1 - attended_mask, axis=0))
return unnormalized_weights / normalization
示例11: maximum
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def maximum(x, y):
return T.maximum(x, y)
示例12: maximum
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def maximum(x, y):
if checkgrad:
return x + y
return T.maximum(x, y)
示例13: HeKaimingResidualLayerSet
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def HeKaimingResidualLayerSet(inp, inp_dim, vs, training_mode, name="resnet_stack", dropout_keep_rate=1.0, depth=2, initializer=None):
# From http://arxiv.org/pdf/1603.05027v2.pdf
addin = inp
for i in range(depth):
addin = BatchNorm(addin, inp_dim, vs, name + "/" + str(i), training_mode)
if dropout_keep_rate < 1.0:
addin = Dropout(addin, dropout_keep_rate, training_mode)
addin = T.maximum(addin, 0) # ReLU
addin = Linear(addin, inp_dim, inp_dim, vs, name=name + "/" + str(i), initializer=initializer)
return inp + addin
示例14: ReLULayer
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def ReLULayer(inp, inp_dim, outp_dim, vs, name="relu_layer", use_bias=True, initializer=None):
pre_nl = Linear(inp, inp_dim, outp_dim, vs, name, use_bias, initializer)
# ReLU isn't present in this version of Theano.
outp = T.maximum(pre_nl, 0)
return outp
示例15: relu
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import maximum [as 别名]
def relu(self, X):
return T.maximum(X, 0)