本文整理汇总了Python中theano.Mode方法的典型用法代码示例。如果您正苦于以下问题:Python theano.Mode方法的具体用法?Python theano.Mode怎么用?Python theano.Mode使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano
的用法示例。
在下文中一共展示了theano.Mode方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_borrow_output
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def test_borrow_output(self):
a = T.dmatrix()
f = function([a], Out(a, borrow=False))
o = N.ones((3, 3))
assert o is not f(o) # function no longer permits aliasing outputs to inputs
f = function([a], Out(a * 4, borrow=False))
o = N.ones((3, 3))
four = f(o)
assert numpy.all(four == 4)
f(o + .1) # should not clobber the memory used to store four
assert numpy.all(four == 4)
f = function([a], Out(a * 4, borrow=True), mode=theano.Mode('c|py_nogc', 'fast_run'))
o = N.ones((3, 3))
four = f(o)
assert numpy.all(four == 4)
f(o + .1) # should clobber the memory used to store four
if theano.config.cxx:
assert not numpy.all(four == 4)
else:
# The Elemwise.perform method don't reuse memory
# as some numpy version don't support that correctly.
assert numpy.all(four == 4)
示例2: test_no_output_from_implace
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def test_no_output_from_implace():
x = T.matrix()
y = T.matrix()
a = T.dot(x, y)
b = T.tanh(a)
# Ensure that the elemwise op that produces the output is inplace when
# using a mode that does not include the optimization
fct_no_opt = theano.function([x, y], b, mode="FAST_RUN")
op = fct_no_opt.maker.fgraph.outputs[0].owner.op
assert (hasattr(op, 'destroy_map') and 0 in op.destroy_map)
# Ensure that the elemwise op that produces the output is not inplace when
# using a mode that includes the optimization
opt = AddFeatureOptimizer(NoOutputFromInplace())
mode_opt = Mode(linker="cvm", optimizer="fast_run").register((opt, 49.9))
fct_opt = theano.function([x, y], b, mode=mode_opt)
op = fct_opt.maker.fgraph.outputs[0].owner.op
assert (not hasattr(op, 'destroy_map') or 0 not in op.destroy_map)
示例3: test_c_thunks
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def test_c_thunks():
a = tensor.scalars('a')
b, c = tensor.vectors('bc')
cases = [False]
if theano.config.cxx:
cases.append(True)
for c_thunks in cases:
f = function([a, b, c], ifelse(a, a * b, b * c),
mode=Mode(
optimizer=None,
linker=vm.VM_Linker(c_thunks=c_thunks,
use_cloop=False)))
f(1, [2], [3, 2])
from nose.tools import assert_raises
assert_raises(ValueError, f, 0, [2], [3, 4])
assert any([hasattr(t, 'cthunk') for t in f.fn.thunks]) == c_thunks
示例4: test_no_leak_many_graphs
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def test_no_leak_many_graphs():
# Verify no memory leaks when creating and deleting a lot of functions
# This isn't really a unit test, you have to run it and look at top to
# see if there's a leak
for i in xrange(10000):
x = tensor.vector()
z = x
for d in range(10):
z = tensor.sin(-z + 1)
f = function([x], z, mode=Mode(optimizer=None, linker='cvm'))
if not i % 100:
print(gc.collect())
sys.stdout.flush()
gc.collect()
if 1:
f([2.0])
f([3.0])
f([4.0])
f([5.0])
示例5: test_vm_gc
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def test_vm_gc():
"""This already caused a bug in the trunk of Theano.
The bug was introduced in the trunk on July 5th, 2012 and fixed on
July 30th.
"""
x = theano.tensor.vector()
p = RunOnce()(x)
mode = theano.Mode(linker=theano.gof.vm.VM_Linker(lazy=True))
f = theano.function([theano.In(x, mutable=True)], [p + 1, p + 2],
mode=mode)
f([1, 2, 3])
p = RunOnce()(x)
pp = p + p
f = theano.function([x], [pp + pp],
mode=mode)
f([1, 2, 3])
示例6: dnn_version
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def dnn_version():
"""Return the current cuDNN version we compile with.
This returns a tuple with the header version and the library
version we link with. For older cudnn version without version
information, we return -1.
"""
if not dnn_available():
raise Exception(
"We can't determine the cudnn version as it is not available",
dnn_available.msg)
if dnn_version.v is None:
f = theano.function([], DnnVersion()(),
theano.Mode(optimizer=None),
profile=False)
dnn_version.v = f()
return dnn_version.v
示例7: test_deepcopy
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def test_deepcopy():
a = cuda.fmatrix()
a_v = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
# We force the c code to check that we generate c code
mode = theano.Mode("c", mode_with_gpu.optimizer)
f = theano.function([a], a, mode=mode)
theano.printing.debugprint(f)
out = f(a_v)
assert out is not a_v
assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
# We force the python linker as the default code should work for this op
mode = theano.Mode("py", mode_with_gpu.optimizer)
f = theano.function([a], a, mode=mode)
theano.printing.debugprint(f)
out = f(a_v)
assert out is not a_v
assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
示例8: test_borrow_output
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def test_borrow_output(self):
a = T.dmatrix()
f = function([a], Out(a, borrow=False))
o = N.ones((3, 3))
assert o is not f(o) # function no longer permits aliasing outputs to inputs
f = function([a], Out(a*4, borrow=False))
o = N.ones((3, 3))
four = f(o)
assert numpy.all(four == 4)
f(o+.1) # should not clobber the memory used to store four
assert numpy.all(four == 4)
f = function([a], Out(a*4, borrow=True), mode=theano.Mode('c|py_nogc', 'fast_run'))
o = N.ones((3, 3))
four = f(o)
assert numpy.all(four == 4)
f(o+.1) # should clobber the memory used to store four
if theano.config.cxx:
assert not numpy.all(four == 4)
else:
# The Elemwise.perform method don't reuse memory
# as some numpy version don't support that correctly.
assert numpy.all(four == 4)
示例9: __init__
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def __init__(self, network, config=None, batch_size=20):
"""
Create a SGD trainer.
:type network:
:type config: deepy.conf.TrainerConfig
:return:
"""
super(DelayedBatchSGDTrainer, self).__init__(network, config)
self.learning_rate = self.config.learning_rate
self.batch_size = batch_size
logging.info('compiling %s learning function', self.__class__.__name__)
network_updates = list(network.updates) + list(network._learning_updates)
learning_updates = list(self.learning_updates())
update_list = network_updates + learning_updates
logging.info("network updates: %s" % " ".join(map(str, [x[0] for x in network_updates])))
logging.info("learning updates: %s" % " ".join(map(str, [x[0] for x in learning_updates])))
self.learning_func = theano.function(
network.inputs,
self.training_variables,
updates=update_list, allow_input_downcast=True, mode=theano.Mode(linker=THEANO_LINKER))
示例10: speed
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def speed(self):
n_calls = 20000
print("n_calls", n_calls)
for border_mode in ['valid', 'full']:
print()
print(border_mode)
for openmp in [False, True]:
print("OpenMP", openmp)
image_shapes = [(1, 5, 6, 6),
(10, 5, 6, 6),
#(10, 10, 16, 16),
#(10, 10, 32, 32)
]
print("image_shape", image_shapes)
for image_shape in image_shapes:
filter_shapes = [(1, 5, 4, 4), (2, 5, 4, 4), (5, 5, 4, 4)]
print("filter_shapes", filter_shapes)
for filter_shape in filter_shapes:
input = theano.shared(numpy.random.random(image_shape))
filters = theano.shared(numpy.random.random(filter_shape))
output = self.conv2d(input, filters,
image_shape, filter_shape,
border_mode,
unroll_patch=True,
openmp=openmp)
mode = theano.Mode(linker=theano.gof.vm.VM_Linker(
allow_gc=False,
use_cloop=True))
theano_conv = theano.function([], output, mode=mode)
t1 = time.time()
theano_conv.fn(n_calls=n_calls)
t2 = time.time()
print(t2 - t1, end=' ')
print()
示例11: test_including
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def test_including():
mode = theano.Mode(optimizer='merge')
mode.including('fast_compile')
示例12: test_ifelse
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def test_ifelse():
a = T.scalar()
b = generic()
c = generic()
notimpl = NotImplementedOp()
lazys = [True]
# We need lazy to end up being True for this test.
if theano.config.vm.lazy in [True, None]:
lazys = [True, None]
cloops = [True, False]
if theano.config.cxx == "":
cloops = [False]
for cloop in cloops:
for lazy in lazys:
linker = theano.gof.vm.VM_Linker(use_cloop=cloop, lazy=lazy)
f = function([a, b, c], ifelse(a, notimpl(b), c),
mode=Mode(linker=linker, optimizer='fast_run'))
try:
# print "case 1"
f(1, 'a', 'b')
assert False
except NotImplementedOp.E:
pass
# print "... passed"
# print "case 2"
# print f(0, 'a', 'b')
assert f(0, 'a', 'b') == 'b'
# print "... passed"
示例13: more_complex_test
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def more_complex_test():
notimpl = NotImplementedOp()
ifelseifelseif = IfElseIfElseIf()
x1 = T.scalar('x1')
x2 = T.scalar('x2')
c1 = T.scalar('c1')
c2 = T.scalar('c2')
t1 = ifelse(c1, x1, notimpl(x2))
t1.name = 't1'
t2 = t1 * 10
t2.name = 't2'
t3 = ifelse(c2, t2, x1 + t1)
t3.name = 't3'
t4 = ifelseifelseif(T.eq(x1, x2), x1, T.eq(x1, 5), x2, c2, t3, t3 + 0.5)
t4.name = 't4'
f = function([c1, c2, x1, x2], t4, mode=Mode(linker='vm',
optimizer='fast_run'))
if theano.config.vm.lazy is False:
try:
f(1, 0, numpy.array(10, dtype=x1.dtype), 0)
assert False
except NotImplementedOp.E:
pass
else:
print(f(1, 0, numpy.array(10, dtype=x1.dtype), 0))
assert f(1, 0, numpy.array(10, dtype=x1.dtype), 0) == 20.5
print('... passed')
示例14: test_callback
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def test_callback(self):
a, b, c = tensor.scalars('abc')
f = function([a, b, c], (a + b) + c,
mode=Mode(
optimizer=None,
linker=vm.VM_Linker(callback=self.callback)))
f(1, 2, 3)
assert sum(self.n_callbacks.values()) == len(f.maker.fgraph.toposort())
f(1, 2, 3)
assert (sum(self.n_callbacks.values()) ==
len(f.maker.fgraph.toposort()) * 2)
示例15: test_callback_with_ifelse
# 需要导入模块: import theano [as 别名]
# 或者: from theano import Mode [as 别名]
def test_callback_with_ifelse(self):
a, b, c = tensor.scalars('abc')
f = function([a, b, c], ifelse(a, 2 * b, 2 * c),
mode=Mode(
optimizer=None,
linker=vm.VM_Linker(callback=self.callback)))
f(1, 2, 3)
assert self.n_callbacks['IfElse'] == 2