本文整理汇总了Python中theano.tensor.fmatrix方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.fmatrix方法的具体用法?Python tensor.fmatrix怎么用?Python tensor.fmatrix使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.fmatrix方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_blocksparse_inplace_gemv_opt
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_blocksparse_inplace_gemv_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o)
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
if theano.config.mode == "FAST_COMPILE":
assert not f.maker.fgraph.toposort()[-1].op.inplace
else:
assert f.maker.fgraph.toposort()[-1].op.inplace
示例2: test_blocksparse_inplace_outer_opt
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_blocksparse_inplace_outer_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
theano.printing.debugprint(tensor.grad(o.sum(), wrt=W))
f = theano.function([W, h, iIdx, b, oIdx],
[o, tensor.grad(o.sum(), wrt=W)])
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
if theano.config.mode == "FAST_COMPILE":
assert not f.maker.fgraph.toposort()[-1].op.inplace
else:
assert f.maker.fgraph.toposort()[-1].op.inplace
示例3: test_sparseblockdot
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_sparseblockdot(self):
"""
Compares the numpy version of sparseblockgemv to sparse_block_dot.
"""
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.imatrix()
oIdx = tensor.imatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)
W_val, h_val, iIdx_val, b_val, oIdx_val = \
BlockSparse_Gemv_and_Outer.gemv_data()
th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val)
ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)
utt.assert_allclose(ref_out, th_out)
示例4: test_sparseblockgemv
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_sparseblockgemv(self):
"""
Compares the numpy and theano versions of sparseblockgemv.
"""
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.imatrix()
oIdx = tensor.imatrix()
o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)
W_val, h_val, iIdx_val, b_val, oIdx_val = \
BlockSparse_Gemv_and_Outer.gemv_data()
th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val)
ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)
utt.assert_allclose(ref_out, th_out)
示例5: test_1msigmoid
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_1msigmoid(self):
if not register_local_1msigmoid:
return
m = self.get_mode()
x = T.fmatrix()
# tests exp_over_1_plus_exp
f = theano.function([x], 1 - T.exp(x) / (1 + T.exp(x)), mode=m)
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
assert [node.op for node in f.maker.fgraph.toposort()] == [
tensor.neg, sigmoid_inplace]
# tests inv_1_plus_exp
f = theano.function([x], 1 - T.fill(x, 1.0) / (1 + T.exp(-x)), mode=m)
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
assert [node.op for node in f.maker.fgraph.toposort()] == [tensor.neg,
sigmoid_inplace]
示例6: test_dot22scalar_cast
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_dot22scalar_cast():
"""
Test that in `dot22_to_dot22scalar` we properly cast integers to floats.
"""
# Note that this test was failing before d5ff6904.
A = T.dmatrix()
for scalar_int_type in T.int_dtypes:
y = T.scalar(dtype=scalar_int_type)
f = theano.function([A, y], T.dot(A, A) * y, mode=mode_blas_opt)
assert _dot22scalar in [x.op for x in f.maker.fgraph.toposort()]
A = T.fmatrix()
for scalar_int_type in T.int_dtypes:
y = T.scalar(dtype=scalar_int_type)
f = theano.function([A, y], T.dot(A, A) * y, mode=mode_blas_opt)
if scalar_int_type in ['int32', 'int64']:
assert _dot22 in [x.op for x in f.maker.fgraph.toposort()]
else:
assert _dot22scalar in [x.op for x in f.maker.fgraph.toposort()]
示例7: setUp
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def setUp(self):
self.iv = T.tensor(dtype='int32', broadcastable=(False,))
self.fv = T.tensor(dtype='float32', broadcastable=(False,))
self.fv1 = T.tensor(dtype='float32', broadcastable=(True,))
self.dv = T.tensor(dtype='float64', broadcastable=(False,))
self.dv1 = T.tensor(dtype='float64', broadcastable=(True,))
self.cv = T.tensor(dtype='complex64', broadcastable=(False,))
self.zv = T.tensor(dtype='complex128', broadcastable=(False,))
self.fv_2 = T.tensor(dtype='float32', broadcastable=(False,))
self.fv1_2 = T.tensor(dtype='float32', broadcastable=(True,))
self.dv_2 = T.tensor(dtype='float64', broadcastable=(False,))
self.dv1_2 = T.tensor(dtype='float64', broadcastable=(True,))
self.cv_2 = T.tensor(dtype='complex64', broadcastable=(False,))
self.zv_2 = T.tensor(dtype='complex128', broadcastable=(False,))
self.fm = T.fmatrix()
self.dm = T.dmatrix()
self.cm = T.cmatrix()
self.zm = T.zmatrix()
self.fa = T.fscalar()
self.da = T.dscalar()
self.ca = T.cscalar()
self.za = T.zscalar()
示例8: test_local_remove_all_assert
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_local_remove_all_assert():
x = theano.tensor.fmatrix()
a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())
# By default `unsafe` should not be there
f = theano.function([x], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
# Put `unsafe`
f = theano.function([x], a, mode=mode_with_gpu.including('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 0
# Remove `unsafe`
f = theano.function([x], a, mode=mode_with_gpu.excluding('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
示例9: test_pdbbreakpoint_op
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_pdbbreakpoint_op():
""" Test that PdbBreakpoint ops don't block gpu optimization"""
b = tensor.fmatrix()
# Create a function composed of a breakpoint followed by
# some computation
condition = tensor.gt(b.sum(), 0)
b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b)
output = b_monitored ** 2
f = theano.function([b], output, mode=mode_with_gpu)
# Ensure that, in the compiled function, the computation following the
# breakpoint has been moved to the gpu.
topo = f.maker.fgraph.toposort()
assert isinstance(topo[-2].op, cuda.GpuElemwise)
assert topo[-1].op == cuda.host_from_gpu
示例10: test_local_gpu_elemwise_careduce
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_local_gpu_elemwise_careduce():
x = theano.tensor.fmatrix()
o = (x * x).sum()
f = theano.function([x], o, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 3
assert topo[1].op.pre_scalar_op == theano.scalar.sqr
data = numpy.random.rand(3, 4).astype('float32')
utt.assert_allclose(f(data), (data * data).sum())
o = (x * x).sum(axis=1)
f = theano.function([x], o, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 3
assert topo[1].op.pre_scalar_op == theano.scalar.sqr
utt.assert_allclose(f(data), (data * data).sum(axis=1))
示例11: test_elemwise_fusion
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_elemwise_fusion():
""" Test the the GpuElemwise fusion work correctly"""
shape = (3, 4)
a = cuda.shared_constructor(theano._asarray(numpy.random.rand(*shape),
dtype='float32'), 'a')
b = tensor.fmatrix()
c = tensor.fmatrix()
f = pfunc([b, c], [a + b + c], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
for i, node in enumerate(topo):
print(i, node, file=sys.stdout)
assert len(topo) == 4
assert isinstance(topo[2].op.scalar_op, theano.scalar.basic.Composite)
# let debugmode catch errors
f(theano._asarray(numpy.random.rand(*shape), dtype='float32'),
theano._asarray(numpy.random.rand(*shape), dtype='float32'))
示例12: test_incsubtensor_mixed
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_incsubtensor_mixed():
# This catches a bug that occurred when incrementing
# a float32 tensor by a float64 tensor.
# The result is defined to be float32, so it is OK
# to downcast the float64 increment in order to
# transfer it to the GPU.
# The bug was that the optimization called GpuFromHost
# without casting first, causing the optimization to
# fail.
X = tensor.fmatrix()
Y = tensor.dmatrix()
Z = tensor.inc_subtensor(X[0:1, 0:1], Y)
f = theano.function([X, Y], Z, mode=mode_with_gpu)
packed, = f.maker.fgraph.inputs[1].clients
client, idx = packed
print(client)
assert isinstance(client.op, tensor.Elemwise)
assert isinstance(client.op.scalar_op, theano.scalar.Cast)
packed, = client.outputs[0].clients
client, idx = packed
assert isinstance(client.op, cuda.GpuFromHost)
示例13: test_elemwise0
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_elemwise0():
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(4, 4),
dtype='float32'), 'a')
b = tensor.fmatrix()
f = pfunc([b], [], updates=[(a, a + b)], mode=mode_with_gpu)
# check that we work inplace.
assert (list(f.maker.fgraph.toposort()[1].op.destroy_map.items())
== [(0, [0])])
a0 = a.get_value() * 1.0
f(numpy.ones((4, 4), dtype='float32'))
assert numpy.all(a0 + 1.0 == a.get_value())
示例14: test_elemwise1
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_elemwise1():
""" Several kinds of elemwise expressions with no broadcasting,
non power-of-two shape """
shape = (3, 4)
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
dtype='float32') + 0.5, 'a')
b = tensor.fmatrix()
# let debugmode catch any mistakes
f = pfunc([b], [], updates=[(a, b ** a)], mode=mode_with_gpu)
f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)
# let debugmode catch any mistakes
f = pfunc([b], [], updates=[(a, tensor.exp(b ** a))], mode=mode_with_gpu)
f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)
# let debugmode catch any mistakes
f = pfunc([b], [], updates=[(a, a + b * tensor.exp(b ** a))],
mode=mode_with_gpu)
f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)
示例15: test_elemwise_comparaison_cast
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import fmatrix [as 别名]
def test_elemwise_comparaison_cast():
"""
test if an elemwise comparaison followed by a cast to float32 are
pushed to gpu.
"""
a = tensor.fmatrix()
b = tensor.fmatrix()
av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
bv = numpy.ones((4, 4), dtype='float32')
for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv),
(tensor.le, av <= bv), (tensor.ge, av >= bv)]:
f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu)
out = f(av, bv)
assert numpy.all(out == ans)
assert any([isinstance(node.op, cuda.GpuElemwise)
for node in f.maker.fgraph.toposort()])