本文整理汇总了Python中theano.config方法的典型用法代码示例。如果您正苦于以下问题:Python theano.config方法的具体用法?Python theano.config怎么用?Python theano.config使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano
的用法示例。
在下文中一共展示了theano.config方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: grad
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def grad(self, inp, grads):
x, = inp
gz, = grads
gz = as_tensor_variable(gz)
grad_order = ['x'] * len(x.type.broadcastable)
for i, v in enumerate(self.new_order):
if v != 'x':
grad_order[v] = i
# Do not make the DimShuffle inplace as an optimization at the
# canonicalization optimization phase will remove the inplace.
# The inplace will be reintroduced automatically later in the graph.
if 'int' in inp[0].dtype:
return [inp[0].zeros_like(dtype=theano.config.floatX)]
else:
return [DimShuffle(gz.type.broadcastable, grad_order)(
Elemwise(scalar.identity)(gz))]
示例2: summary
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def summary(self, file=sys.stderr, n_ops_to_print=20,
n_apply_to_print=20):
self.summary_function(file)
self.summary_globals(file)
local_time = sum(self.apply_time.values())
if local_time > 0:
self.summary_class(file, n_ops_to_print)
self.summary_ops(file, n_ops_to_print)
self.summary_nodes(file, n_apply_to_print)
elif self.fct_callcount > 0:
print(" No execution time accumulated "
"(hint: try config profiling.time_thunks=1)", file=file)
if config.profiling.debugprint:
fcts = set([n.fgraph for n in self.apply_time.keys()])
theano.printing.debugprint(fcts, print_type=True)
if self.variable_shape or self.variable_strides:
self.summary_memory(file, n_apply_to_print)
if self.optimizer_profile:
print("Optimizer Profile", file=file)
print("-----------------", file=file)
self.optimizer_profile[0].print_profile(file,
self.optimizer_profile[1])
self.print_tips(file)
示例3: parse_config_string
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def parse_config_string(config_string, issue_warnings=True):
"""
Parses a config string (comma-separated key=value components) into a dict.
"""
config_dict = {}
my_splitter = shlex.shlex(config_string, posix=True)
my_splitter.whitespace = ','
my_splitter.whitespace_split = True
for kv_pair in my_splitter:
kv_pair = kv_pair.strip()
if not kv_pair:
continue
kv_tuple = kv_pair.split('=', 1)
if len(kv_tuple) == 1:
if issue_warnings:
TheanoConfigWarning.warn(
("Config key '%s' has no value, ignoring it"
% kv_tuple[0]),
stacklevel=1)
else:
k, v = kv_tuple
# subsequent values for k will override earlier ones
config_dict[k] = v
return config_dict
示例4: test_op_struct
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def test_op_struct(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
sop = StructOp()
c = sop(theano.tensor.constant(0))
mode = None
if theano.config.mode == 'FAST_COMPILE':
mode = 'FAST_RUN'
f = theano.function([], c, mode=mode)
rval = f()
assert rval == 0
rval = f()
assert rval == 1
c2 = sop(theano.tensor.constant(1))
f2 = theano.function([], [c, c2], mode=mode)
rval = f2()
assert rval == [0, 0]
示例5: get_params
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def get_params(cost, criterion=lambda x: hasattr(x, 'param') and x.param==True):
"""
Default criterion:
lambda x: hasattr(x, 'param') and x.param==True
This will return every parameter for cost from computation graph.
To exclude a parameter, just set 'param' to False:
>>> h0 = lib.param('h0',\
numpy.zeros((3, 2*512), dtype=theano.config.floatX))
>>> print h0.param # Default: True
>>> h0.param = False
In this case one still can get list of all params (False or True) by:
>>> lib.get_params(cost, lambda x: hasattr(x, 'param')
:returns:
A list of params
"""
return search(cost, criterion)
示例6: is_1pexp
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def is_1pexp(t):
"""
Returns
-------
object
If 't' is of the form (1+exp(x)), return (False, x).
Else return None.
"""
if t.owner and t.owner.op == tensor.add:
scalars, scalar_inputs, nonconsts = \
opt.scalarconsts_rest(t.owner.inputs)
# scalar_inputs are potentially dimshuffled and fill'd scalars
if len(nonconsts) == 1:
maybe_exp = nonconsts[0]
if maybe_exp.owner and maybe_exp.owner.op == tensor.exp:
# Verify that the constant terms sum to 1.
if scalars:
scal_sum = scalars[0]
for s in scalars[1:]:
scal_sum = scal_sum + s
if numpy.allclose(scal_sum, 1):
return False, maybe_exp.owner.inputs[0]
# Before 7987b51 there used to be a bug where *any* constant
# was considered as if it was equal to 1, and thus this
# function would incorrectly identify it as (1 + exp(x)).
if config.warn.identify_1pexp_bug:
warnings.warn(
'Although your current code is fine, please note that '
'Theano versions prior to 0.5 (more specifically, '
'prior to commit 7987b51 on 2011-12-18) may have '
'yielded an incorrect result. To remove this warning, '
'either set the `warn.identify_1pexp_bug` config '
'option to False, or `warn.ignore_bug_before` to at '
'least \'0.4.1\'.')
return None
示例7: __init__
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def __init__(self, atexit_print=True, flag_time_thunks=None, **kwargs):
if (hasattr(theano, 'sandbox') and
hasattr(theano.sandbox, 'cuda') and
theano.sandbox.cuda.cuda_enabled):
if os.environ.get('CUDA_LAUNCH_BLOCKING', '0') != '1':
raise Exception(
"You are running the Theano profiler with CUDA enabled."
" Theano GPU ops execution is asynchronous by default."
" So by default, the profile is useless."
" You must set the environment variable"
" CUDA_LAUNCH_BLOCKING to 1 to tell the CUDA driver to"
" synchronize the execution to get a meaningful profile.")
self.apply_callcount = {}
self.output_size = {}
self.apply_time = {}
self.apply_cimpl = {}
self.variable_shape = {}
self.variable_strides = {}
if flag_time_thunks is None:
self.flag_time_thunks = config.profiling.time_thunks
else:
self.flag_time_thunks = flag_time_thunks
self.__dict__.update(kwargs)
if atexit_print:
global _atexit_print_list
_atexit_print_list.append(self)
global _atexit_registered
if not _atexit_registered:
atexit.register(_atexit_print_fn)
_atexit_registered = True
self.ignore_first_call = theano.config.profiling.ignore_first_call
示例8: fetch_val_for_key
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def fetch_val_for_key(key, delete_key=False):
"""Return the overriding config value for a key.
A successful search returns a string value.
An unsuccessful search raises a KeyError
The (decreasing) priority order is:
- THEANO_FLAGS
- ~./theanorc
"""
# first try to find it in the FLAGS
try:
if delete_key:
return THEANO_FLAGS_DICT.pop(key)
return THEANO_FLAGS_DICT[key]
except KeyError:
pass
# next try to find it in the config file
# config file keys can be of form option, or section.option
key_tokens = key.rsplit('.', 1)
if len(key_tokens) > 2:
raise KeyError(key)
if len(key_tokens) == 2:
section, option = key_tokens
else:
section, option = 'global', key
try:
try:
return theano_cfg.get(section, option)
except ConfigParser.InterpolationError:
return theano_raw_cfg.get(section, option)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
raise KeyError(key)
示例9: get_config_md5
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def get_config_md5():
"""
Return a string md5 of the current config options. It should be such that
we can safely assume that two different config setups will lead to two
different strings.
We only take into account config options for which `in_c_key` is True.
"""
all_opts = sorted([c for c in _config_var_list if c.in_c_key],
key=lambda cv: cv.fullname)
return theano.gof.utils.hash_from_code('\n'.join(
['%s = %s' % (cv.fullname, cv.__get__(True, None)) for cv in all_opts]))
示例10: __set__
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def __set__(self, cls, val):
if not self.allow_override and hasattr(self, 'val'):
raise Exception(
"Can't change the value of this config parameter "
"after initialization!")
# print "SETTING PARAM", self.fullname,(cls), val
if self.filter:
self.val = self.filter(val)
else:
self.val = val
示例11: get_module_cache
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def get_module_cache(init_args=None):
"""
Parameters
----------
init_args
If not None, the (k, v) pairs in this dictionary will be forwarded to
the ModuleCache constructor as keyword arguments.
"""
return cmodule.get_module_cache(config.compiledir, init_args=init_args)
示例12: get_persistent_module_cache
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def get_persistent_module_cache():
global _persistent_module_cache
if _persistent_module_cache is None:
_persistent_module_cache = CallCache(os.path.join(config.compiledir,
'persistent_cache'))
return _persistent_module_cache
示例13: get_c_extract
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def get_c_extract(r, name, sub):
"""
Wrapper around c_extract that initializes py_name from storage.
"""
# `c_extract` is called when getting the value of an apply node's
# input from the compute map, before being used by its clients.
# If one of the clients has `check_input=True`, we need to perform
# checks on the variable.
# However that code is not used by C code of the apply node creating
# this variable, so there is no need to check `r.owner.op.check_input`.
if any([getattr(c.op, 'check_input', config.check_input)
for (c, _) in r.clients
if not isinstance(c, string_types)]):
# check_broadcast is just an hack to easily remove just the
# broadcast check on the old GPU back-end. This check isn't
# done in the new GPU back-end or on the CPU.
if any([getattr(c.op, 'check_broadcast', True)
for (c, _) in r.clients
if not isinstance(c, string_types)]):
c_extract = r.type.c_extract(name, sub, True)
else:
try:
c_extract = r.type.c_extract(
name, sub, True,
check_broadcast=False)
except TypeError as e:
c_extract = r.type.c_extract(name, sub, True)
else:
c_extract = r.type.c_extract(name, sub, False)
pre = """
py_%(name)s = PyList_GET_ITEM(storage_%(name)s, 0);
{Py_XINCREF(py_%(name)s);}
""" % locals()
return pre + c_extract
示例14: get_c_extract_out
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def get_c_extract_out(r, name, sub):
"""
Wrapper around c_extract_out that initializes py_name from storage.
"""
# `c_extract_out` is used to extract an output variable from
# the compute map, to be used as pre-allocated memory for `r`
# before its value gets computed.
# If the node producing `r` has `check_inputs=True`, it may
# also perform type checks on the initial value of the output,
# so we need to pass `check_input=True` to `c_extract_out`.
# However, that code is not used by potential clients of `r`,
# so we do not need to check them.
check_input = getattr(r.owner.op, 'check_input', config.check_input)
# check_broadcast is just an hack to easily remove just the
# broadcast check on the old GPU back-end. This check isn't
# done in the new GPU back-end or on the CPU.
if getattr(r.owner.op, 'check_broadcast', True):
c_extract = r.type.c_extract_out(name, sub, check_input)
else:
try:
c_extract = r.type.c_extract_out(name, sub, check_input,
check_broadcast=False)
except TypeError as e:
c_extract = r.type.c_extract_out(name, sub, check_input)
pre = """
py_%(name)s = PyList_GET_ITEM(storage_%(name)s, 0);
{Py_XINCREF(py_%(name)s);}
""" % locals()
return pre + c_extract
示例15: compile_cmodule
# 需要导入模块: import theano [as 别名]
# 或者: from theano import config [as 别名]
def compile_cmodule(self, location=None):
"""
This compiles the source code for this linker and returns a
loaded module.
"""
if location is None:
location = cmodule.dlimport_workdir(config.compiledir)
mod = self.get_dynamic_module()
c_compiler = self.c_compiler()
libs = self.libraries()
preargs = self.compile_args()
# We want to compute the code without the lock
src_code = mod.code()
get_lock()
try:
_logger.debug("LOCATION %s", str(location))
module = c_compiler.compile_str(
module_name=mod.code_hash,
src_code=src_code,
location=location,
include_dirs=self.header_dirs(),
lib_dirs=self.lib_dirs(),
libs=libs,
preargs=preargs)
except Exception as e:
e.args += (str(self.fgraph),)
raise
finally:
release_lock()
return module