本文整理汇总了Python中theano.ifelse方法的典型用法代码示例。如果您正苦于以下问题:Python theano.ifelse方法的具体用法?Python theano.ifelse怎么用?Python theano.ifelse使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano
的用法示例。
在下文中一共展示了theano.ifelse方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_c_thunks
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def test_c_thunks():
a = tensor.scalars('a')
b, c = tensor.vectors('bc')
cases = [False]
if theano.config.cxx:
cases.append(True)
for c_thunks in cases:
f = function([a, b, c], ifelse(a, a * b, b * c),
mode=Mode(
optimizer=None,
linker=vm.VM_Linker(c_thunks=c_thunks,
use_cloop=False)))
f(1, [2], [3, 2])
from nose.tools import assert_raises
assert_raises(ValueError, f, 0, [2], [3, 4])
assert any([hasattr(t, 'cthunk') for t in f.fn.thunks]) == c_thunks
示例2: test_lazy_if
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def test_lazy_if(self):
# Tests that lazy if works .. even if the two results have different
# shapes but the same type (i.e. both vectors, or matrices or
# whatnot of same dtype)
x = tensor.vector('x', dtype=self.dtype)
y = tensor.vector('y', dtype=self.dtype)
c = tensor.iscalar('c')
f = theano.function([c, x, y], ifelse(c, x, y), mode=self.mode)
self.assertFunctionContains1(f, self.get_ifelse(1))
rng = numpy.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)
assert numpy.allclose(vx, f(1, vx, vy))
assert numpy.allclose(vy, f(0, vx, vy))
示例3: test_sparse_tensor_error
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def test_sparse_tensor_error(self):
import theano.sparse
if not theano.sparse.enable_sparse:
raise SkipTest("Optimization temporarily disabled")
rng = numpy.random.RandomState(utt.fetch_seed())
data = rng.rand(2, 3).astype(self.dtype)
x = self.shared(data)
y = theano.sparse.matrix('csc', dtype=self.dtype, name='y')
z = theano.sparse.matrix('csr', dtype=self.dtype, name='z')
cond = theano.tensor.iscalar('cond')
self.assertRaises(TypeError, ifelse, cond, x, y)
self.assertRaises(TypeError, ifelse, cond, y, x)
self.assertRaises(TypeError, ifelse, cond, x, z)
self.assertRaises(TypeError, ifelse, cond, z, x)
self.assertRaises(TypeError, ifelse, cond, y, z)
self.assertRaises(TypeError, ifelse, cond, z, y)
示例4: __init__
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def __init__(self, input, centerbias = None, alpha=1.0):
self.input = input
if centerbias is None:
centerbias = np.ones(12)
self.alpha = theano.shared(value = np.array(alpha).astype(theano.config.floatX), name='alpha')
self.centerbias_ys = theano.shared(value=np.array(centerbias, dtype=theano.config.floatX), name='centerbias_ys')
self.centerbias_xs = theano.shared(value=np.linspace(0, 1, len(centerbias), dtype=theano.config.floatX), name='centerbias_xs')
height = T.cast(input.shape[0], theano.config.floatX)
width = T.cast(input.shape[1], theano.config.floatX)
x_coords = (T.arange(width) - 0.5*width) / (0.5*width)
y_coords = (T.arange(height) - 0.5*height) / (0.5*height) + 0.0001 # We cannot have zeros in there because of grad
x_coords = x_coords.dimshuffle('x', 0)
y_coords = y_coords.dimshuffle(0, 'x')
dists = T.sqrt(T.square(x_coords) + self.alpha*T.square(y_coords))
self.max_dist = T.sqrt(1 + self.alpha)
self.dists = dists/self.max_dist
self.factors = nonlinearity(self.dists, self.centerbias_xs, self.centerbias_ys, len(centerbias))
apply_centerbias = T.gt(self.centerbias_ys.shape[0], 2)
self.output = ifelse(apply_centerbias, self.input*self.factors, self.input)
self.params = [self.centerbias_ys, self.alpha]
示例5: gate_layer
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def gate_layer(tparams, X_word, X_char, options, prefix, pretrain_mode, activ='lambda x: x', **kwargs):
"""
compute the forward pass for a gate layer
Parameters
----------
tparams : OrderedDict of theano shared variables, {parameter name: value}
X_word : theano 3d tensor, word input, dimensions: (num of time steps, batch size, dim of vector)
X_char : theano 3d tensor, char input, dimensions: (num of time steps, batch size, dim of vector)
options : dictionary, {hyperparameter: value}
prefix : string, layer name
pretrain_mode : theano shared scalar, 0. = word only, 1. = char only, 2. = word & char
activ : string, activation function: 'liner', 'tanh', or 'rectifier'
Returns
-------
X : theano 3d tensor, final vector, dimensions: (num of time steps, batch size, dim of vector)
"""
# compute gating values, Eq.(3)
G = tensor.nnet.sigmoid(tensor.dot(X_word, tparams[p_name(prefix, 'v')]) + tparams[p_name(prefix, 'b')][0])
X = ifelse(tensor.le(pretrain_mode, numpy.float32(1.)),
ifelse(tensor.eq(pretrain_mode, numpy.float32(0.)), X_word, X_char),
G[:, :, None] * X_char + (1. - G)[:, :, None] * X_word)
return eval(activ)(X)
示例6: concat_layer
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def concat_layer(tparams, X_word, X_char, options, prefix, pretrain_mode, activ='lambda x: x', **kwargs):
"""
compute the forward pass for a concat layer
Parameters
----------
tparams : OrderedDict of theano shared variables, {parameter name: value}
X_word : theano 3d tensor, word input, dimensions: (num of time steps, batch size, dim of vector)
X_char : theano 3d tensor, char input, dimensions: (num of time steps, batch size, dim of vector)
options : dictionary, {hyperparameter: value}
prefix : string, layer name
pretrain_mode : theano shared scalar, 0. = word only, 1. = char only, 2. = word & char
activ : string, activation function: 'liner', 'tanh', or 'rectifier'
Returns
-------
X : theano 3d tensor, final vector, dimensions: (num of time steps, batch size, dim of vector)
"""
X = ifelse(tensor.le(pretrain_mode, numpy.float32(1.)),
ifelse(tensor.eq(pretrain_mode, numpy.float32(0.)), X_word, X_char),
tensor.dot(tensor.concatenate([X_word, X_char], axis=2), tparams[p_name(prefix, 'W')]) + tparams[p_name(prefix, 'b')])
return eval(activ)(X)
示例7: __init__
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def __init__(self, rng, input, n_in, n_out, is_train,
activation, dropout_rate, mask=None, W=None, b=None):
super(DropoutHiddenLayer, self).__init__(
rng=rng, input=input, n_in=n_in, n_out=n_out, W=W, b=b,
activation=activation)
self.dropout_rate = dropout_rate
self.srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
self.mask = mask
self.layer = self.output
# Computes outputs for train and test phase applying dropout when needed.
train_output = self.layer * T.cast(self.mask, theano.config.floatX)
test_output = self.output * (1 - dropout_rate)
self.output = ifelse(T.eq(is_train, 1), train_output, test_output)
return
示例8: test_ifelse
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def test_ifelse(self):
config1 = theano.config.profile
config2 = theano.config.profile_memory
try:
theano.config.profile = True
theano.config.profile_memory = True
a, b = T.scalars('a', 'b')
x, y = T.scalars('x', 'y')
z = ifelse(T.lt(a, b), x * 2, y * 2)
p = theano.ProfileStats(False)
if theano.config.mode in ["DebugMode", "DEBUG_MODE", "FAST_COMPILE"]:
m = "FAST_RUN"
else:
m = None
f_ifelse = theano.function([a, b, x, y], z, profile=p, name="test_ifelse",
mode=m)
val1 = 0.
val2 = 1.
big_mat1 = 10
big_mat2 = 11
f_ifelse(val1, val2, big_mat1, big_mat2)
finally:
theano.config.profile = config1
theano.config.profile_memory = config2
示例9: test_ifelse
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def test_ifelse():
a = T.scalar()
b = generic()
c = generic()
notimpl = NotImplementedOp()
lazys = [True]
# We need lazy to end up being True for this test.
if theano.config.vm.lazy in [True, None]:
lazys = [True, None]
cloops = [True, False]
if theano.config.cxx == "":
cloops = [False]
for cloop in cloops:
for lazy in lazys:
linker = theano.gof.vm.VM_Linker(use_cloop=cloop, lazy=lazy)
f = function([a, b, c], ifelse(a, notimpl(b), c),
mode=Mode(linker=linker, optimizer='fast_run'))
try:
# print "case 1"
f(1, 'a', 'b')
assert False
except NotImplementedOp.E:
pass
# print "... passed"
# print "case 2"
# print f(0, 'a', 'b')
assert f(0, 'a', 'b') == 'b'
# print "... passed"
示例10: more_complex_test
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def more_complex_test():
notimpl = NotImplementedOp()
ifelseifelseif = IfElseIfElseIf()
x1 = T.scalar('x1')
x2 = T.scalar('x2')
c1 = T.scalar('c1')
c2 = T.scalar('c2')
t1 = ifelse(c1, x1, notimpl(x2))
t1.name = 't1'
t2 = t1 * 10
t2.name = 't2'
t3 = ifelse(c2, t2, x1 + t1)
t3.name = 't3'
t4 = ifelseifelseif(T.eq(x1, x2), x1, T.eq(x1, 5), x2, c2, t3, t3 + 0.5)
t4.name = 't4'
f = function([c1, c2, x1, x2], t4, mode=Mode(linker='vm',
optimizer='fast_run'))
if theano.config.vm.lazy is False:
try:
f(1, 0, numpy.array(10, dtype=x1.dtype), 0)
assert False
except NotImplementedOp.E:
pass
else:
print(f(1, 0, numpy.array(10, dtype=x1.dtype), 0))
assert f(1, 0, numpy.array(10, dtype=x1.dtype), 0) == 20.5
print('... passed')
示例11: test_callback_with_ifelse
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def test_callback_with_ifelse(self):
a, b, c = tensor.scalars('abc')
f = function([a, b, c], ifelse(a, 2 * b, 2 * c),
mode=Mode(
optimizer=None,
linker=vm.VM_Linker(callback=self.callback)))
f(1, 2, 3)
assert self.n_callbacks['IfElse'] == 2
示例12: test_no_leak_many_call_lazy
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def test_no_leak_many_call_lazy():
# Verify no memory leaks when calling a function a lot of times
# This isn't really a unit test, you have to run it and look at top to
# see if there's a leak
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = ifelse(z.mean() > 0.5, -z, z)
return z
def time_linker(name, linker):
steps_a = 10
x = tensor.dvector()
a = build_graph(x, steps_a)
f_a = function([x], a,
mode=Mode(optimizer=None,
linker=linker()))
inp = numpy.random.rand(1000000)
for i in xrange(100):
f_a(inp)
if 0: # this doesn't seem to work, prints 0 for everything
import resource
pre = resource.getrusage(resource.RUSAGE_SELF)
post = resource.getrusage(resource.RUSAGE_SELF)
print(pre.ru_ixrss, post.ru_ixrss)
print(pre.ru_idrss, post.ru_idrss)
print(pre.ru_maxrss, post.ru_maxrss)
print(1)
time_linker('vmLinker_C',
lambda: vm.VM_Linker(allow_gc=False, use_cloop=True))
print(2)
time_linker('vmLinker',
lambda: vm.VM_Linker(allow_gc=False, use_cloop=False))
示例13: test_not_lazy_if_inplace
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def test_not_lazy_if_inplace(self):
# Tests that if the outputs are scalars and the graph is big,
# we disable the inplace opt to speed up optimization
x = tensor.vector('x', dtype=self.dtype)
y = tensor.vector('y', dtype=self.dtype)
c = tensor.iscalar('c')
mode = theano.compile.get_mode(self.mode).excluding(
# Disable many opt to keep the graph big enough to disable
# the opt.
'fusion', 'local_add_canonizer',
'inplace', 'constant_folding', 'constant_folding')
y2 = reduce(lambda x, y: x + y, [y] + list(range(200)))
f = theano.function([c, x, y], ifelse(c, x, y2), mode=mode)
# For not inplace ifelse
ifnode = [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, IfElse)]
assert len(ifnode) == 1
assert not ifnode[0].op.as_view
rng = numpy.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)
assert numpy.allclose(vx, f(1, vx, vy))
assert numpy.allclose(vy + sum(range(200)), f(0, vx, vy))
示例14: test_mixed_dtype
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def test_mixed_dtype(self):
x1 = tensor.vector('x1', dtype='int32')
x2 = tensor.vector('x2', dtype=self.dtype)
y1 = tensor.vector('y1', dtype='int32')
y2 = tensor.vector('y2', dtype=self.dtype)
c = tensor.iscalar('c')
f = theano.function([c, x1, x2, y1, y2],
ifelse(c, (x1, x2), (y1, y2)), mode=self.mode)
self.assertFunctionContains1(f, self.get_ifelse(2))
rng = numpy.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx1 = numpy.asarray(rng.uniform(size=(xlen,)) * 3, 'int32')
vx2 = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy1 = numpy.asarray(rng.uniform(size=(ylen,)) * 3, 'int32')
vy2 = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)
o1, o2 = f(1, vx1, vx2, vy1, vy2)
assert numpy.allclose(vx1, o1)
assert numpy.allclose(vx2, o2)
o1, o2 = f(0, vx1, vx2, vy1, vy2)
assert numpy.allclose(vy1, o1)
assert numpy.allclose(vy2, o2)
示例15: test_lazy_if_on_generics
# 需要导入模块: import theano [as 别名]
# 或者: from theano import ifelse [as 别名]
def test_lazy_if_on_generics(self):
x = theano.generic()
y = theano.generic()
c = tensor.iscalar('c')
f = theano.function([c, x, y], ifelse(c, x, y))
vx = ['testX']
vy = ['testY']
assert f(1, vx, vy) == vx
assert f(0, vx, vy) == vy