本文整理汇总了Python中theano.ifelse.ifelse函数的典型用法代码示例。如果您正苦于以下问题:Python ifelse函数的具体用法?Python ifelse怎么用?Python ifelse使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ifelse函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build
def build(self, output, tparams=None, BNparams=None):
if self.BN_mode:
self.BN_eps = npt(self.BN_eps)
if not hasattr(self, 'BN_mean'):
self.BN_mean = T.mean(output)
if not hasattr(self, 'BN_std'):
m2 = (1 + 1 / (T.prod(output.shape) - 1)).astype(floatX)
self.BN_std = T.sqrt(m2 * T.var(output) + self.BN_eps)
if self.BN_mode == 2:
t_mean = T.mean(output, axis=[0, 2, 3], keepdims=True)
t_var = T.var(output, axis=[0, 2, 3], keepdims=True)
BN_mean = BNparams[p_(self.prefix, 'mean')].dimshuffle(
'x', 0, 'x', 'x')
BN_std = BNparams[p_(self.prefix, 'std')].dimshuffle(
'x', 0, 'x', 'x')
output = ifelse(
self.training,
(output - t_mean) / T.sqrt(t_var + self.BN_eps),
(output - BN_mean) / BN_std)
output *= tparams[p_(self.prefix, 'BN_scale')].dimshuffle(
'x', 0, 'x', 'x')
output += tparams[p_(self.prefix, 'BN_shift')].dimshuffle(
'x', 0, 'x', 'x')
elif self.BN_mode == 1:
t_mean = T.mean(output)
t_var = T.var(output)
output = ifelse(
self.training,
(output - t_mean) / T.sqrt(t_var + self.BN_eps),
((output - BNparams[p_(self.prefix, 'mean')])
/ BNparams[p_(self.prefix, 'std')]))
output *= tparams[p_(self.prefix, 'BN_scale')]
output += tparams[p_(self.prefix, 'BN_shift')]
self.output = self.activation(output)
示例2: get_sensi_speci
def get_sensi_speci(y_hat, y):
# y_hat = T.concatenate(T.sum(input=y_hat[:, 0:2], axis=1), T.sum(input=y_hat[:, 2:], axis=1))
y_hat = T.stacklists([y_hat[:, 0] + y_hat[:, 1], y_hat[:, 2] + y_hat[:, 3] + y_hat[:, 4]]).T
y_hat = T.argmax(y_hat)
tag = 10 * y_hat + y
tneg = T.cast((T.shape(tag[(T.eq(tag, 0.)).nonzero()]))[0], config.floatX)
fneg = T.cast((T.shape(tag[(T.eq(tag, 1.)).nonzero()]))[0], config.floatX)
fpos = T.cast((T.shape(tag[(T.eq(tag, 10.)).nonzero()]))[0], config.floatX)
tpos = T.cast((T.shape(tag[(T.eq(tag, 11.)).nonzero()]))[0], config.floatX)
# assert fneg + fneg + fpos + tpos == 1380
# tneg.astype(config.floatX)
# fneg.astype(config.floatX)
# fpos.astype(config.floatX)
# tpos.astype(config.floatX)
speci = ifelse(T.eq((tneg + fpos), 0), np.float64(float('inf')), tneg / (tneg + fpos))
sensi = ifelse(T.eq((tpos + fneg), 0), np.float64(float('inf')), tpos / (tpos + fneg))
# keng die!!!
# if T.eq((tneg + fpos), 0):
# speci = float('inf')
# else:
# speci = tneg // (tneg + fpos)
# if T.eq((tpos + fneg), 0.):
# sensi = float('inf')
# else:
# sensi = tpos // (tpos + fneg)
# speci.astype(config.floatX)
# sensi.astype(config.floatX)
return [sensi, speci]
示例3: __init__
def __init__(self, factor=numpy.sqrt(2), decay=1.0, min_factor=None, padding=False, **kwargs):
super(ConvFMPLayer, self).__init__(**kwargs)
if min_factor is None:
min_factor = factor
factor = T.maximum(factor * (decay ** self.network.epoch), numpy.float32(min_factor))
sizes_raw = self.source.output_sizes
# handle size problems
if not padding:
padding = T.min(self.source.output_sizes / factor) <= 0
padding = theano.printing.Print(global_fn=maybe_print_pad_warning)(padding)
fixed_sizes = T.maximum(sizes_raw, T.cast(T.as_tensor(
[factor + self.filter_height - 1, factor + self.filter_width - 1]), 'float32'))
sizes = ifelse(padding, fixed_sizes, sizes_raw)
X_size = T.cast(T.max(sizes, axis=0), "int32")
def pad_fn(x_t, s):
x = T.alloc(numpy.cast["float32"](0), X_size[0], X_size[1], self.X.shape[3])
x = T.set_subtensor(x[:s[0], :s[1]], x_t[:s[0], :s[1]])
return x
fixed_X, _ = theano.scan(pad_fn, [self.X.dimshuffle(2, 0, 1, 3), T.cast(sizes_raw, "int32")])
fixed_X = fixed_X.dimshuffle(1, 2, 0, 3)
self.X = ifelse(padding, T.unbroadcast(fixed_X, 3), self.X)
conv_out = CuDNNConvHWBCOpValidInstance(self.X, self.W, self.b)
conv_out_sizes = self.conv_output_size_from_input_size(sizes)
self.output, self.output_sizes = fmp(conv_out, conv_out_sizes, T.cast(factor,'float32'))
示例4: _forward
def _forward(self):
eps = self.eps
param_size = (1, 1, self.n_output, 1, 1)
self.gamma = self.declare(param_size)
self.beta = self.declare(param_size)
mean = self.inpt.mean(axis=[0, 1, 3, 4], keepdims=False)
std = self.inpt.std(axis=[0, 1, 3, 4], keepdims=False)
self._setup_running_metrics(self.n_output)
self.running_mean.default_update = ifelse(
self.training,
(1.0 - self.alpha) * self.running_mean + self.alpha * mean,
self.running_mean
)
self.running_std.default_update = ifelse(
self.training,
(1.0 - self.alpha) * self.running_std + self.alpha * std,
self.running_std
)
# This will be optimized away, but ensures the running mean and the running std get updated.
# Reference: https://gist.github.com/f0k/f1a6bd3c8585c400c190#file-batch_norm-py-L86
mean += 0 * self.running_mean
std += 0 * self.running_std
use_mean = ifelse(self.training, mean, self.running_mean)
use_std = ifelse(self.training, std, self.running_std)
use_mean = use_mean.dimshuffle('x', 'x', 0, 'x', 'x')
use_std = use_std.dimshuffle('x', 'x', 0, 'x', 'x')
norm_inpt = (self.inpt - use_mean) / (use_std + eps)
self.output = self.gamma * norm_inpt + self.beta
示例5: AdaMaxAvg2
def AdaMaxAvg2(ws, objective, alpha=.01, beta1=.1, beta2=.001, beta3=0.01, n_accum=1):
if n_accum == 1:
return AdaMaxAvg(ws, objective, alpha, beta1, beta2, beta3)
print 'AdaMax_Avg2', 'alpha:',alpha,'beta1:',beta1,'beta2:',beta2,'beta3:',beta3,'n_accum:',n_accum
gs = G.ndict.T_grad(objective.sum(), ws, disconnected_inputs='raise')
new = OrderedDict()
from theano.ifelse import ifelse
it = G.sharedf(0.)
new[it] = it + 1
reset = T.eq(T.mod(it,n_accum), 0)
update = T.eq(T.mod(it,n_accum), n_accum-1)
ws_avg = []
for j in range(len(ws)):
w_avg = {}
for i in ws[j]:
_w = ws[j][i]
_g = gs[j][i]
#_g = T.switch(T.isnan(_g),T.zeros_like(_g),_g) #remove NaN's
mom1 = G.sharedf(_w.get_value() * 0.)
_max = G.sharedf(_w.get_value() * 0.)
w_avg[i] = G.sharedf(_w.get_value())
g_sum = G.sharedf(_w.get_value() * 0.)
new[g_sum] = ifelse(reset, _g, g_sum + _g)
new[mom1] = ifelse(update, (1-beta1) * mom1 + beta1 * new[g_sum], mom1)
new[_max] = ifelse(update, T.maximum((1-beta2)*_max, abs(new[g_sum]) + 1e-8), _max)
new[_w] = ifelse(update, _w + alpha * new[mom1] / new[_max], _w)
new[w_avg[i]] = ifelse(update, beta3 * new[_w] + (1.-beta3) * w_avg[i], w_avg[i])
ws_avg += [w_avg]
return new, ws_avg
示例6: call
def call(self, vals, mask=None):
block_out = vals[0]
prev_out = vals[1]
test_out = self.zi * block_out
return ifelse(self.test, test_out, ifelse(self.zi,block_out,prev_out))
示例7: gate_layer
def gate_layer(tparams, X_word, X_char, options, prefix, pretrain_mode, activ='lambda x: x', **kwargs):
"""
compute the forward pass for a gate layer
Parameters
----------
tparams : OrderedDict of theano shared variables, {parameter name: value}
X_word : theano 3d tensor, word input, dimensions: (num of time steps, batch size, dim of vector)
X_char : theano 3d tensor, char input, dimensions: (num of time steps, batch size, dim of vector)
options : dictionary, {hyperparameter: value}
prefix : string, layer name
pretrain_mode : theano shared scalar, 0. = word only, 1. = char only, 2. = word & char
activ : string, activation function: 'liner', 'tanh', or 'rectifier'
Returns
-------
X : theano 3d tensor, final vector, dimensions: (num of time steps, batch size, dim of vector)
"""
# compute gating values, Eq.(3)
G = tensor.nnet.sigmoid(tensor.dot(X_word, tparams[p_name(prefix, 'v')]) + tparams[p_name(prefix, 'b')][0])
X = ifelse(tensor.le(pretrain_mode, numpy.float32(1.)),
ifelse(tensor.eq(pretrain_mode, numpy.float32(0.)), X_word, X_char),
G[:, :, None] * X_char + (1. - G)[:, :, None] * X_word)
return eval(activ)(X)
示例8: more_complex_test
def more_complex_test():
notimpl = NotImplementedOp()
ifelseifelseif = IfElseIfElseIf()
x1 = T.scalar('x1')
x2 = T.scalar('x2')
c1 = T.scalar('c1')
c2 = T.scalar('c2')
t1 = ifelse(c1, x1, notimpl(x2))
t1.name = 't1'
t2 = t1 * 10
t2.name = 't2'
t3 = ifelse(c2, t2, x1 + t1)
t3.name = 't3'
t4 = ifelseifelseif(T.eq(x1, x2), x1, T.eq(x1, 5), x2, c2, t3, t3 + 0.5)
t4.name = 't4'
f = function([c1, c2, x1, x2], t4, mode=Mode(linker='vm',
optimizer='fast_run'))
if theano.config.vm.lazy is False:
try:
f(1, 0, numpy.array(10, dtype=x1.dtype), 0)
assert False
except NotImplementedOp.E:
pass
else:
print(f(1, 0, numpy.array(10, dtype=x1.dtype), 0))
assert f(1, 0, numpy.array(10, dtype=x1.dtype), 0) == 20.5
print('... passed')
示例9: get_aggregator
def get_aggregator(self):
initialized = shared_like(0.)
numerator_acc = shared_like(self.numerator)
denominator_acc = shared_like(self.denominator)
conditional_update_num = ifelse(initialized,
self.numerator + numerator_acc,
self.numerator)
conditional_update_den = ifelse(initialized,
self.denominator + denominator_acc,
self.denominator)
initialization_updates = [(numerator_acc,
tensor.zeros_like(numerator_acc)),
(denominator_acc,
tensor.zeros_like(denominator_acc)),
(initialized, 0.)]
accumulation_updates = [(numerator_acc,
conditional_update_num),
(denominator_acc,
conditional_update_den),
(initialized, 1.)]
aggregator = Aggregator(aggregation_scheme=self,
initialization_updates=initialization_updates,
accumulation_updates=accumulation_updates,
readout_variable=(numerator_acc /
denominator_acc))
return aggregator
示例10: build_model
def build_model(self):
print '\n... building the model with unroll=%d, backroll=%d' \
% (self.source.unroll, self.source.backroll)
x = T.imatrix('x')
y = T.imatrix('y')
reset = T.scalar('reset')
hiddens = [h['init'] for h in self.hiddens.values()]
outputs_info = [None] * 3 + hiddens
[losses, probs, errors, hids], updates = \
theano.scan(self.step, sequences=[x, y], outputs_info=outputs_info)
loss = losses.sum()
error = errors.sum() / T.cast((T.neq(y, 255).sum()), floatX)
hidden_updates_train = []
hidden_updates_test = []
for h in self.hiddens.values():
h_train = ifelse(T.eq(reset, 0), \
hids[-1-self.source.backroll, :], T.ones_like(h['init']))
h_test = ifelse(T.eq(reset, 0), \
hids[-1, :], T.ones_like(h['init']))
hidden_updates_train.append((h['init'], h_train))
hidden_updates_test.append((h['init'], h_test))
updates = self.source.get_updates(loss, self.sgd_params)
updates += hidden_updates_train
rets = [loss, probs[-1, :], error]
mode = theano.Mode(linker='cvm')
train_model = theano.function([x, y, reset, self.lr], rets, \
updates=updates, mode=mode)
test_model = theano.function([x, y, reset], rets, \
updates=hidden_updates_test, mode=mode)
return train_model, test_model
示例11: norm_col
def norm_col(w, h):
"""normalize the column vector w (Theano function).
Apply the invert normalization on h such that w.h does not change
Parameters
----------
w: Theano vector
vector to be normalised
h: Ttheano vector
vector to be normalised by the invert normalistation
Returns
-------
w : Theano vector with the same shape as w
normalised vector (w/norm)
h : Theano vector with the same shape as h
h*norm
"""
norm = w.norm(2, 0)
eps = 1e-12
size_norm = (T.ones_like(w)).norm(2, 0)
w = ifelse(T.gt(norm, eps),
w/norm,
(w+eps)/(eps*size_norm).astype(theano.config.floatX))
h = ifelse(T.gt(norm, eps),
h*norm,
(h*eps*size_norm).astype(theano.config.floatX))
return w, h
示例12: get_aggregator
def get_aggregator(self):
initialized = shared_like(0.)
numerator_acc = shared_like(self.numerator)
denominator_acc = shared_like(self.denominator)
# Dummy default expression to use as the previously-aggregated
# value, that has the same shape as the new result
numerator_zeros = tensor.as_tensor(self.numerator).zeros_like()
denominator_zeros = tensor.as_tensor(self.denominator).zeros_like()
conditional_update_num = self.numerator + ifelse(initialized,
numerator_acc,
numerator_zeros)
conditional_update_den = self.denominator + ifelse(initialized,
denominator_acc,
denominator_zeros)
initialization_updates = [(numerator_acc,
tensor.zeros_like(numerator_acc)),
(denominator_acc,
tensor.zeros_like(denominator_acc)),
(initialized, 0.)]
accumulation_updates = [(numerator_acc,
conditional_update_num),
(denominator_acc,
conditional_update_den),
(initialized, 1.)]
aggregator = Aggregator(aggregation_scheme=self,
initialization_updates=initialization_updates,
accumulation_updates=accumulation_updates,
readout_variable=(numerator_acc /
denominator_acc))
return aggregator
示例13: test_merge_ifs_true_false
def test_merge_ifs_true_false(self):
raise SkipTest("Optimization temporarily disabled")
x1 = tensor.scalar('x1')
x2 = tensor.scalar('x2')
y1 = tensor.scalar('y1')
y2 = tensor.scalar('y2')
w1 = tensor.scalar('w1')
w2 = tensor.scalar('w2')
c = tensor.iscalar('c')
out = ifelse(c,
ifelse(c, x1, x2) + ifelse(c, y1, y2) + w1,
ifelse(c, x1, x2) + ifelse(c, y1, y2) + w2)
f = theano.function([x1, x2, y1, y2, w1, w2, c], out,
allow_input_downcast=True)
assert len([x for x in f.maker.env.toposort()
if isinstance(x.op, IfElse)]) == 1
rng = numpy.random.RandomState(utt.fetch_seed())
vx1 = rng.uniform()
vx2 = rng.uniform()
vy1 = rng.uniform()
vy2 = rng.uniform()
vw1 = rng.uniform()
vw2 = rng.uniform()
assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
vx1 + vy1 + vw1)
assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
vx2 + vy2 + vw2)
示例14: _recursive_step
def _recursive_step(self, i, regs, tokens, seqs, back_routes, back_lens):
seq = seqs[i]
# Encoding
left, right, target = seq[0], seq[1], seq[2]
left_rep = ifelse(T.lt(left, 0), tokens[-left], regs[left])
right_rep = ifelse(T.lt(right, 0), tokens[-right], regs[right])
rep = self._encode_computation(left_rep, right_rep)
if self.deep:
inter_rep = rep
rep = self._deep_encode(inter_rep)
else:
inter_rep = T.constant(0)
new_regs = T.set_subtensor(regs[target], rep)
back_len = back_lens[i]
back_reps, lefts, rights = self._unfold(back_routes[i], new_regs, back_len)
gf_W_d1, gf_W_d2, gf_B_d1, gf_B_d2, distance, rep_gradient = self._unfold_gradients(back_reps, lefts, rights, back_routes[i],
tokens, back_len)
return ([rep, inter_rep, left_rep, right_rep, new_regs, rep_gradient, distance],
self.decode_optimizer.setup([self.W_d1, self.W_d2, self.B_d1, self.B_d2],
[gf_W_d1, gf_W_d2, gf_B_d1, gf_B_d2], method=self.optimization, beta=self.beta))
示例15: decay
def decay(self):
updates = []
new_batch = ifelse(T.gt(self.batch, self.decay_batch), sharedX(0), self.batch+1)
new_lr = ifelse(T.gt(self.batch, self.decay_batch), self.lr*self.lr_decay_factor, self.lr)
updates.append((self.batch, new_batch))
updates.append((self.lr, new_lr))
return updates