本文整理汇总了Python中theano.printing方法的典型用法代码示例。如果您正苦于以下问题:Python theano.printing方法的具体用法?Python theano.printing怎么用?Python theano.printing使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano
的用法示例。
在下文中一共展示了theano.printing方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_inplace0
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def test_inplace0():
# should fail to insert gemm_inplace because gemm_inplace would
# create cycles
X, Y, Z, a, b = T.matrix('X'), T.matrix('Y'), T.matrix('Z'), T.scalar(
'a'), T.scalar('b')
R, S, c = T.matrix('R'), T.matrix('S'), T.scalar('c')
f = inplace_func([Z, b, R, S],
[Z * (Z + b * T.dot(R, S).T)], mode='FAST_RUN')
if (gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]):
print(pp(f.maker.fgraph.outputs[0]))
raise Failure('gemm_inplace in graph')
assert gemm_no_inplace in [n.op for n in f.maker.fgraph.apply_nodes]
# gemm_inplace should be inserted here, to work in-place on Z*c
f = inplace_func([X, Y, Z, a, b, R, S, c],
[Z * (c * Z + a * T.dot(X, Y) + b * T.dot(R, S).T)],
mode='FAST_RUN')
if (not gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]):
theano.printing.debugprint(f)
raise Failure('no gemm_inplace in graph')
示例2: test_pydotprint_long_name
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def test_pydotprint_long_name():
"""This is a REALLY PARTIAL TEST.
It prints a graph where there are variable and apply nodes whose long
names are different, but not the shortened names.
We should not merge those nodes in the dot graph.
"""
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
mode = theano.compile.mode.get_default_mode().excluding("fusion")
f = theano.function([x], [x * 2, x + x], mode=mode)
f([1, 2, 3, 4])
theano.printing.pydotprint(f, max_label_size=5,
print_output_file=False)
theano.printing.pydotprint([x * 2, x + x],
max_label_size=5,
print_output_file=False)
示例3: test_printing_scan
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def test_printing_scan():
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
def f_pow2(x_tm1):
return 2 * x_tm1
state = theano.tensor.scalar('state')
n_steps = theano.tensor.iscalar('nsteps')
output, updates = theano.scan(f_pow2,
[],
state,
[],
n_steps=n_steps,
truncate_gradient=-1,
go_backwards=False)
f = theano.function([state, n_steps],
output,
updates=updates,
allow_input_downcast=True)
theano.printing.pydotprint(output, scan_graphs=True)
theano.printing.pydotprint(f, scan_graphs=True)
示例4: test_upcasting_scalar_nogemm
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def test_upcasting_scalar_nogemm():
# Test that the optimization does not crash when the scale has an incorrect
# dtype, and forces upcasting of the result
v = T.fmatrix('v')
w = T.fmatrix('w')
t = T.fmatrix('t')
alpha = T.dscalar('a')
rval = T.dot(w, v) * alpha + t
f = theano.function([w, v, t, alpha], rval)
t = f.maker.fgraph.toposort()
assert numpy.sum([isinstance(n.op, Gemm) for n in t]) == 0
#theano.printing.debugprint(f, print_type=True)
v = T.fmatrix('v')
w = T.fmatrix('w')
t = T.fmatrix('t')
alpha = T.cscalar('a')
on_opt_error = config.on_opt_error
try:
config.on_opt_error = 'raise'
rval = T.dot(w, v) * alpha + t
f = theano.function([w, v, t, alpha], rval)
finally:
config.on_opt_error = on_opt_error
t = f.maker.fgraph.toposort()
assert numpy.sum([isinstance(n.op, Gemm) for n in t]) == 0
#theano.printing.debugprint(f, print_type=True)
示例5: test_inplace1
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def test_inplace1():
X, Y, Z, a, b = XYZab()
# with > 2 terms in the overall addition
f = inplace_func([X, Y, Z],
[Z + Z + T.dot(X, Y)], mode='FAST_RUN')
# theano.printing.debugprint(f)
# it doesn't work inplace because we didn't mark Z as mutable input
assert [n.op for n in f.maker.fgraph.apply_nodes] == [gemm_no_inplace]
示例6: test_upcasting_scalar_nogemv
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def test_upcasting_scalar_nogemv(self):
# Test that the optimization does not crash when the scale has
# an incorrect dtype, and forces upcasting of the result
# We put this test in this class to test it on the gpu too.
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha_v = alpha_v.astype("float64")
a_v = a_v.astype("float32")
x_v = x_v.astype("float32")
y_v = y_v.astype("float32")
alpha = T.dscalar('alpha')
a = self.shared(a_v)
x = self.shared(x_v)
y = self.shared(y_v)
rval = T.dot(a, x) * alpha + y
f = theano.function([alpha], rval, mode=self.mode)
# this function is currently optimized so that the gemv is
# done inplace on a temporarily allocated-buffer, which is
# then scaled by alpha and to t with a fused elemwise.
n_gemvs = 0
#theano.printing.debugprint(f, print_type=True)
for node in f.maker.fgraph.toposort():
if node.op == self.gemv_inplace:
n_gemvs += 1
assert node.outputs[0].dtype == 'float32'
assert n_gemvs == 1, n_gemvs
self.assertFunctionContains1(f, self.gemv_inplace)
f(alpha_v)
示例7: apply
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def apply(self, fgraph):
import theano.printing
print("PrintCurrentFunctionGraph:", self.header)
theano.printing.debugprint(fgraph.outputs)
示例8: test_pydotprint_cond_highlight
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def test_pydotprint_cond_highlight():
"""
This is a REALLY PARTIAL TEST.
I did them to help debug stuff.
"""
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
f = theano.function([x], x * 2)
f([1, 2, 3, 4])
s = StringIO()
new_handler = logging.StreamHandler(s)
new_handler.setLevel(logging.DEBUG)
orig_handler = theano.logging_default_handler
theano.theano_logger.removeHandler(orig_handler)
theano.theano_logger.addHandler(new_handler)
try:
theano.printing.pydotprint(f, cond_highlight=True,
print_output_file=False)
finally:
theano.theano_logger.addHandler(orig_handler)
theano.theano_logger.removeHandler(new_handler)
assert (s.getvalue() == 'pydotprint: cond_highlight is set but there'
' is no IfElse node in the graph\n')
示例9: test_pydotprint_return_image
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def test_pydotprint_return_image():
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
ret = theano.printing.pydotprint(x * 2, return_image=True)
assert isinstance(ret, (str, bytes))
示例10: test_pydotprint_variables
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def test_pydotprint_variables():
"""
This is a REALLY PARTIAL TEST.
I did them to help debug stuff.
It make sure the code run.
"""
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
s = StringIO()
new_handler = logging.StreamHandler(s)
new_handler.setLevel(logging.DEBUG)
orig_handler = theano.logging_default_handler
theano.theano_logger.removeHandler(orig_handler)
theano.theano_logger.addHandler(new_handler)
try:
theano.printing.pydotprint(x * 2)
if not theano.printing.pd.__name__ == "pydot_ng":
theano.printing.pydotprint_variables(x * 2)
finally:
theano.theano_logger.addHandler(orig_handler)
theano.theano_logger.removeHandler(new_handler)
示例11: printing
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def printing(x, string=''):
"""Prints the value of a tensor variable
:param x: Tensor variable
:param string: Prefix to print
:return: The same tensor variable as x
"""
return theano.printing.Print(string)(x)
示例12: dbg_hook
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def dbg_hook(hook, x):
if not isinstance(x, TT.TensorVariable):
x.out = theano.printing.Print(global_fn=hook)(x.out)
return x
else:
return theano.printing.Print(global_fn=hook)(x)
示例13: just_gemm
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def just_gemm(i, o, ishapes=[(4, 3), (3, 5), (4, 5), (), ()],
max_graphlen=0, expected_nb_gemm=1):
try:
f = inplace_func(
[In(ii, mutable=True, allow_downcast=True) for ii in i],
o,
mode='FAST_RUN',
on_unused_input='ignore')
nb_gemm = 0
for node in f.maker.fgraph.apply_nodes:
if isinstance(node.op, T.Dot):
raise Failure('dot not changed to gemm_inplace in graph')
if node.op == _dot22:
raise Failure('_dot22 not changed to gemm_inplace in graph')
if node.op == gemm_inplace:
nb_gemm += 1
assert nb_gemm == expected_nb_gemm, (nb_gemm, expected_nb_gemm)
g = inplace_func(i, o, mode=compile.Mode(linker='py', optimizer=None),
allow_input_downcast=True, on_unused_input='ignore')
for node in g.maker.fgraph.apply_nodes:
if node.op == gemm_inplace:
raise Exception('gemm_inplace in original graph')
graphlen = len(f.maker.fgraph.toposort())
if max_graphlen and (graphlen <= max_graphlen):
# theano.printing.debugprint(f)
assert False, 'graphlen=%i>%i' % (graphlen, max_graphlen)
rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))
r0 = f(*[numpy.asarray(rng.randn(*sh), config.floatX)
for sh in ishapes])
rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))
r1 = g(*[numpy.asarray(rng.randn(*sh), config.floatX)
for sh in ishapes])
max_abs_err = numpy.max(numpy.abs(r0[0] - r1[0]))
eps = 1.0e-8
if config.floatX == 'float32':
eps = 1.0e-6
if max_abs_err > eps:
raise Failure('GEMM is computing the wrong output. max_rel_err =',
max_abs_err)
except Failure:
for node in f.maker.fgraph.toposort():
print('GRAPH', node)
raise
示例14: test_scan_debugprint1
# 需要导入模块: import theano [as 别名]
# 或者: from theano import printing [as 别名]
def test_scan_debugprint1():
k = tensor.iscalar("k")
A = tensor.dvector("A")
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
final_result = result[-1]
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Subtensor{int64} [id A] ''
|Subtensor{int64::} [id B] ''
| |for{cpu,scan_fn} [id C] ''
| | |k [id D]
| | |IncSubtensor{Set;:int64:} [id E] ''
| | | |AllocEmpty{dtype='float64'} [id F] ''
| | | | |Elemwise{add,no_inplace} [id G] ''
| | | | | |k [id D]
| | | | | |Subtensor{int64} [id H] ''
| | | | | |Shape [id I] ''
| | | | | | |Rebroadcast{0} [id J] ''
| | | | | | |DimShuffle{x,0} [id K] ''
| | | | | | |Elemwise{second,no_inplace} [id L] ''
| | | | | | |A [id M]
| | | | | | |DimShuffle{x} [id N] ''
| | | | | | |TensorConstant{1.0} [id O]
| | | | | |Constant{0} [id P]
| | | | |Subtensor{int64} [id Q] ''
| | | | |Shape [id R] ''
| | | | | |Rebroadcast{0} [id J] ''
| | | | |Constant{1} [id S]
| | | |Rebroadcast{0} [id J] ''
| | | |ScalarFromTensor [id T] ''
| | | |Subtensor{int64} [id H] ''
| | |A [id M]
| |Constant{1} [id U]
|Constant{-1} [id V]
Inner graphs of the scan ops:
for{cpu,scan_fn} [id C] ''
>Elemwise{mul,no_inplace} [id W] ''
> |<TensorType(float64, vector)> [id X] -> [id E]
> |A_copy [id Y] -> [id M]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()