本文整理匯總了Python中theano.compile方法的典型用法代碼示例。如果您正苦於以下問題:Python theano.compile方法的具體用法?Python theano.compile怎麽用?Python theano.compile使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano
的用法示例。
在下文中一共展示了theano.compile方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def __init__(self, name, shared=tensor._shared,
sub=tensor.Subtensor,
inc_sub=tensor.IncSubtensor,
adv_sub1=tensor.AdvancedSubtensor1,
adv_incsub1=tensor.AdvancedIncSubtensor1,
mode=None,
dtype=theano.config.floatX,
type=tensor.TensorType,
ignore_topo=DeepCopyOp):
self.shared = shared
self.sub = sub
self.inc_sub = inc_sub
self.adv_sub1 = adv_sub1
self.adv_incsub1 = adv_incsub1
if mode is None:
mode = theano.compile.mode.get_default_mode()
self.mode = mode
self.dtype = dtype
self.type = type
self.ignore_topo = ignore_topo
self.fast_compile = theano.config.mode == 'FAST_COMPILE'
self.ops = (sub, inc_sub, adv_sub1, adv_incsub1)
return super(T_subtensor, self).__init__(name)
示例2: dnn_version
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def dnn_version():
"""Return the current cuDNN version we compile with.
This returns a tuple with the header version and the library
version we link with. For older cudnn version without version
information, we return -1.
"""
if not dnn_available():
raise Exception(
"We can't determine the cudnn version as it is not available",
dnn_available.msg)
if dnn_version.v is None:
f = theano.function([], DnnVersion()(),
theano.Mode(optimizer=None),
profile=False)
dnn_version.v = f()
return dnn_version.v
示例3: handle_shared_float32
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def handle_shared_float32(tf):
"""
Set the default shared type for float32 tensor to CudaNdarrayType.
This function is intended to be called from use(gpu_index), not directly.
"""
if tf:
theano.compile.shared_constructor(float32_shared_constructor)
else:
theano.compile.shared_constructor(float32_shared_constructor, True)
assert (float32_shared_constructor not in
theano.compile.shared.constructors)
# We can't test the driver during import here as this cause circular
# import dependency. So we also test it in the file theano/__init__.py
示例4: test_1d_set_adv_selection
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def test_1d_set_adv_selection(self):
a = set_subtensor(self.v[self.adv1q], self.v[self.adv1q])
assert a.type == self.v.type
# TODO: compile a function and verify that the subtensor is removed
# completely, because the whole expression is redundant.
f = theano.function([self.v, self.adv1q], a, allow_input_downcast=True)
aval = f([.4, .9, .1], [1, 2])
assert numpy.allclose(aval, [.4, 0.9, 0.1])
示例5: compile_gpu_func
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def compile_gpu_func(nan_is_error, inf_is_error, big_is_error):
""" compile utility function used by contains_nan and contains_inf
"""
global f_gpumin, f_gpumax, f_gpuabsmax
if not cuda.cuda_available:
return
guard_input = cuda.fvector('nan_guard')
cuda_compile_failed = False
if (nan_is_error or inf_is_error) and f_gpumin is None:
try:
f_gpumin = theano.function(
[guard_input], T.min(guard_input),
mode='FAST_RUN'
)
except RuntimeError:
# This can happen if cuda is available, but the
# device is in exclusive mode and used by another
# process.
cuda_compile_failed = True
if inf_is_error and not cuda_compile_failed and f_gpumax is None:
try:
f_gpumax = theano.function(
[guard_input], T.max(guard_input),
mode='FAST_RUN'
)
except RuntimeError:
# This can happen if cuda is available, but the
# device is in exclusive mode and used by another
# process.
cuda_compile_failed = True
if big_is_error and not cuda_compile_failed and f_gpuabsmax is None:
try:
f_gpuabsmax = theano.function(
[guard_input], T.max(T.abs_(guard_input)),
mode='FAST_RUN'
)
except RuntimeError:
# This can happen if cuda is available, but the
# device is in exclusive mode and used by another
# process.
cuda_compile_failed = True
示例6: test_constant_output
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def test_constant_output(self):
# Test that if the output is a constant, we respect the theano memory interface
f = theano.function([], theano.tensor.constant([4]))
# print f.maker.fgraph.toposort()
out = f()
assert (out == 4).all()
out[0] = 3
out2 = f()
# If the following 2 asserts fail it mean Theano broke it's memory contract.
assert out2 is not out
assert (out2 == 4).all()
# Test that if the output is a constant and borrow, we respect the theano memory interface
f = theano.function([], Out(theano.tensor.constant([4]), borrow=True))
# print f.maker.fgraph.toposort()
out = f()
assert (out == 4).all()
out[0] = 3
out2 = f()
if isinstance(theano.compile.mode.get_default_mode(),
theano.compile.DebugMode):
# In DebugMode, we don't implement optimization based on borrow on the output.
assert (out2 == 4).all()
else:
assert out2 is out
assert (out2 == 3).all()
示例7: test_empty_givens_updates
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def test_empty_givens_updates():
"""
Regression test for bug fixed in 8625e03.
"""
# Empty givens / updates dictionaries were not properly detected before,
# triggering useless crashes at compile time.
x = T.scalar()
y = x * 2
function([theano.In(x)], y, givens={})
function([theano.In(x)], y, updates={})
示例8: setUp
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def setUp(self):
self.old_ts = theano.tensor.TensorType.filter_checks_isfinite
self.old_dm = theano.compile.mode.predefined_modes[
'DEBUG_MODE'].check_isfinite
示例9: tearDown
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def tearDown(self):
theano.tensor.TensorType.filter_checks_isfinite = self.old_ts
theano.compile.mode.predefined_modes[
'DEBUG_MODE'].check_isfinite = self.old_dm
示例10: test_check_isfinite
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def test_check_isfinite(self):
x = theano.tensor.vector()
f = theano.function([x], (x + 2) * 5, mode='DEBUG_MODE')
g = theano.function([x], theano.tensor.log(x), mode='DEBUG_MODE')
# this should work
f(numpy.log([3, 4, 5]).astype(config.floatX))
# if TensorType.filter_checks_isfinite were true, these would raise
# ValueError
# if not, DebugMode will check internally, and raise InvalidValueError
# passing an invalid value as an input should trigger ValueError
self.assertRaises(debugmode.InvalidValueError, f,
numpy.log([3, -4, 5]).astype(config.floatX))
self.assertRaises(debugmode.InvalidValueError, f,
(numpy.asarray([0, 1.0, 0]) / 0).astype(config.floatX))
self.assertRaises(debugmode.InvalidValueError, f,
(numpy.asarray([1.0, 1.0, 1.0]) / 0).astype(config.floatX))
# generating an invalid value internally should trigger
# InvalidValueError
self.assertRaises(debugmode.InvalidValueError, g,
numpy.asarray([3, -4, 5], dtype=config.floatX))
# this should disable the exception
theano.tensor.TensorType.filter_checks_isfinite = False
theano.compile.mode.predefined_modes[
'DEBUG_MODE'].check_isfinite = False
# insert several Inf
f(numpy.asarray(numpy.asarray([1.0, 1.0, 1.0]) / 0,
dtype=config.floatX))
示例11: test_duplicate_inputs
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def test_duplicate_inputs(self):
x = theano.tensor.lscalar('x')
self.assertRaises(theano.compile.UnusedInputError,
theano.function, [x, x, x], x)
示例12: test_reallocation
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def test_reallocation():
x = tensor.scalar('x')
y = tensor.scalar('y')
z = tensor.tanh(3 * x + y) + tensor.cosh(x + 5 * y)
# The functinality is currently implement for non lazy and non c VM only.
for l in [vm.VM_Linker(allow_gc=False, lazy=False, use_cloop=False),
vm.VM_Linker(allow_gc=True, lazy=False, use_cloop=False)]:
m = theano.compile.get_mode(theano.Mode(linker=l))
m = m.excluding('fusion', 'inplace')
f = theano.function([x, y], z, name="test_reduce_memory",
mode=m)
output = f(1, 2)
assert output
storage_map = f.fn.storage_map
def check_storage(storage_map):
from theano.tensor.var import TensorConstant
for i in storage_map:
if not isinstance(i, TensorConstant):
keys_copy = list(storage_map.keys())[:]
keys_copy.remove(i)
for o in keys_copy:
if (storage_map[i][0] and
storage_map[i][0] is storage_map[o][0]):
return [True, storage_map[o][0]]
return [False, None]
assert check_storage(storage_map)[0]
assert len(set(id(v) for v in
itervalues(storage_map))) < len(storage_map)
示例13: set_cuda_disabled
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def set_cuda_disabled():
"""
Function used to disable cuda.
A warning is displayed, so that the user is aware that cuda-based code is
not going to work.
Note that there is no point calling this function from outside of
`cuda.__init__`, since it has no effect once the module is loaded.
"""
global cuda_available, cuda_warning_is_displayed
cuda_available = False
# cuda_ndarray compile and import
示例14: test_filter_float
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def test_filter_float():
theano.compile.shared_constructor(gpuarray_shared_constructor)
try:
s = theano.shared(numpy.array(0.0, dtype='float32'),
target=test_ctx_name)
theano.function([], updates=[(s, 0.0)])
finally:
del theano.compile.sharedvalue.shared.constructors[-1]
示例15: setUp
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import compile [as 別名]
def setUp(self):
self.old_ts = theano.tensor.TensorType.filter_checks_isfinite
self.old_dm = theano.compile.mode.predefined_modes[
'DEBUG_MODE'].check_isfinite