本文整理汇总了Python中theano.compile.mode.get_default_mode函数的典型用法代码示例。如果您正苦于以下问题:Python get_default_mode函数的具体用法?Python get_default_mode怎么用?Python get_default_mode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_default_mode函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: function
def function(self, name=None):
""" Returns a compiled theano function to compute a representation """
inputs = tensor.matrix()
if self.cpu_only:
return theano.function([inputs], self(inputs), name=name, mode=get_default_mode().excluding("gpu"))
else:
return theano.function([inputs], self(inputs), name=name)
示例2: get_mode
def get_mode(gpu):
mode = get_default_mode()
mode = copy.copy(mode)
if gpu:
mode = mode.including('gpu', 'gpu_local_optimizations', 'local_cut_gpu_host_gpu', 'local_gpu_multinomial')
if isinstance(mode.linker, theano.gof.PerformLinker):
mode.linker = predefined_linkers['c|py']
return mode
示例3: get_mode
def get_mode(gpu):
mode = get_default_mode()
if theano.config.mode == 'FAST_COMPILE':
mode = theano.compile.get_mode('FAST_RUN')
if gpu:
mode = mode.including('gpu', 'gpu_local_optimizations',
'local_cut_gpu_host_gpu',
'local_gpu_multinomial')
return mode
示例4: get_mode
def get_mode(gpu):
mode = get_default_mode()
mode = copy.copy(mode)
if gpu:
mode = mode.including("gpu", "gpu_local_optimizations", "local_cut_gpu_host_gpu", "local_gpu_multinomial")
if isinstance(mode.linker, theano.gof.PerformLinker):
mode.linker = predefined_linkers["c|py"]
if hasattr(mode.linker, "c_thunks"):
mode.linker.c_thunks = True
return mode
示例5: setUp
def setUp(self):
self.test_vals = [
numpy.array(x, dtype=config.floatX)
for x in [0, 1, numpy.nan, numpy.inf, -numpy.inf, [numpy.nan, numpy.inf, -numpy.inf, 0, 1, -1]]
]
self.scalar = tensor.scalar()
self.vector = tensor.vector()
self.mode = get_default_mode()
if isinstance(self.mode, theano.compile.debugmode.DebugMode):
# Disable the check preventing usage of NaN / Inf values.
self.mode = copy(self.mode)
self.mode.check_isfinite = False
示例6: function
def function(self, name=None):
"""
Returns a compiled theano function to compute a representation
Parameters
----------
name : string, optional
name of the function
"""
inputs = tensor.matrix()
if self.cpu_only:
return theano.function([inputs], self(inputs), name=name,
mode=get_default_mode().excluding('gpu'))
else:
return theano.function([inputs], self(inputs), name=name)
示例7: test_naacl_model
def test_naacl_model(iters_per_unsup=3, iters_per_sup=3,
optimizer=None, realistic=False):
#print "BUILDING MODEL"
import time
t = time.time()
if optimizer:
mode = theano.Mode(linker='c|py', optimizer=optimizer)
else:
mode = get_default_mode()
if mode.__class__.__name__ == 'DebugMode':
iters_per_unsup = 1
iters_per_sup = 1
if realistic:
m = create_realistic(compile_mode=mode)
else:
m = create(compile_mode=mode)
#print 'BUILD took %.3fs'%(time.time() - t)
prog_str = []
idx_of_node = {}
for i, node in enumerate(m.pretraining_update.maker.fgraph.toposort()):
idx_of_node[node] = i
if False and i > -1:
print ' ', i, node, [(ii, idx_of_node.get(ii.
owner, 'IN')) for ii in node.inputs]
prog_str.append(str(node))
#print input_pretraining_gradients[4].owner.inputs
#print input_pretraining_gradients[4].owner.inputs[1].owner.inputs
#sys.exit()
#print "PROGRAM LEN %i HASH %i"% (len(m.pretraining_update.maker.fgraph.apply_nodes), reduce(lambda a, b: hash(a) ^ hash(b),prog_str))
rng = N.random.RandomState(unittest_tools.fetch_seed(23904))
inputs = [rng.rand(10, m.input_size) for i in 1, 2, 3]
targets = N.asarray([0, 3, 4, 2, 3, 4, 4, 2, 1, 0])
#print inputs
#print 'UNSUPERVISED PHASE'
t = time.time()
for i in xrange(3):
for j in xrange(iters_per_unsup):
try:
known_fail = False
m.pretraining_update(*inputs)
except ValueError:
known_fail = True
except TypeError:
known_fail = True
if known_fail:
raise KnownFailureTest("Deprecated compile.module fails to "
"give a sensible warning when updates to a variable "
"have the wrong type")
s0, s1 = [str(j) for j in m.pretraining_update(*inputs)]
#print 'huh?', i, iters_per_unsup, iters_per_unsup * (i+1), s0, s1
if iters_per_unsup == 3:
assert s0.startswith('0.927793') # '0.403044')
assert s1.startswith('0.068035') # '0.074898')
#print 'UNSUPERVISED took %.3fs'%(time.time() - t)
#print 'FINETUNING GRAPH'
#print 'SUPERVISED PHASE COSTS (%s)'%optimizer
t = time.time()
for i in xrange(3):
for j in xrange(iters_per_unsup):
m.finetuning_update(*(inputs + [targets]))
s0 = str(m.finetuning_update(*(inputs + [targets])))
#print iters_per_sup * (i+1), s0
if iters_per_sup == 10:
s0f = float(s0)
assert 19.7042 < s0f and s0f < 19.7043