本文整理汇总了Python中theano.tensor.lmatrix方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.lmatrix方法的具体用法?Python tensor.lmatrix怎么用?Python tensor.lmatrix使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.lmatrix方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_blocksparse_inplace_gemv_opt
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def test_blocksparse_inplace_gemv_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o)
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
if theano.config.mode == "FAST_COMPILE":
assert not f.maker.fgraph.toposort()[-1].op.inplace
else:
assert f.maker.fgraph.toposort()[-1].op.inplace
示例2: test_blocksparse_inplace_outer_opt
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def test_blocksparse_inplace_outer_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
theano.printing.debugprint(tensor.grad(o.sum(), wrt=W))
f = theano.function([W, h, iIdx, b, oIdx],
[o, tensor.grad(o.sum(), wrt=W)])
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
if theano.config.mode == "FAST_COMPILE":
assert not f.maker.fgraph.toposort()[-1].op.inplace
else:
assert f.maker.fgraph.toposort()[-1].op.inplace
示例3: test_blocksparse_inplace_outer_opt
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def test_blocksparse_inplace_outer_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
theano.printing.debugprint(tensor.grad(o.sum(), wrt=W))
f = theano.function([W, h, iIdx, b, oIdx],
[o, tensor.grad(o.sum(), wrt=W)])
if theano.config.mode == "FAST_COMPILE":
assert not f.maker.fgraph.toposort()[-1].op.inplace
else:
assert f.maker.fgraph.toposort()[-1].op.inplace
示例4: test_correct_solution
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def test_correct_solution(self):
x = tensor.lmatrix()
y = tensor.lmatrix()
z = tensor.lscalar()
b = theano.tensor.nlinalg.lstsq()(x, y, z)
f = function([x, y, z], b)
TestMatrix1 = numpy.asarray([[2, 1], [3, 4]])
TestMatrix2 = numpy.asarray([[17, 20], [43, 50]])
TestScalar = numpy.asarray(1)
f = function([x, y, z], b)
m = f(TestMatrix1, TestMatrix2, TestScalar)
self.assertTrue(numpy.allclose(TestMatrix2, numpy.dot(TestMatrix1, m[0])))
示例5: multMatVect
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def multMatVect(v, A, m1, B, m2):
# TODO : need description for parameter and return
"""
Multiply the first half of v by A with a modulo of m1 and the second half
by B with a modulo of m2.
Notes
-----
The parameters of dot_modulo are passed implicitly because passing them
explicitly takes more time than running the function's C-code.
"""
if multMatVect.dot_modulo is None:
A_sym = tensor.lmatrix('A')
s_sym = tensor.ivector('s')
m_sym = tensor.iscalar('m')
A2_sym = tensor.lmatrix('A2')
s2_sym = tensor.ivector('s2')
m2_sym = tensor.iscalar('m2')
o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
multMatVect.dot_modulo = function(
[A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o, profile=False)
# This way of calling the Theano fct is done to bypass Theano overhead.
f = multMatVect.dot_modulo
f.input_storage[0].storage[0] = A
f.input_storage[1].storage[0] = v[:3]
f.input_storage[2].storage[0] = m1
f.input_storage[3].storage[0] = B
f.input_storage[4].storage[0] = v[3:]
f.input_storage[5].storage[0] = m2
f.fn()
r = f.output_storage[0].storage[0]
return r
示例6: test_blocksparse_gpu_gemv_opt
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def test_blocksparse_gpu_gemv_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o, mode=mode_with_gpu)
assert sum(1 for n in f.maker.fgraph.apply_nodes
if isinstance(n.op, GpuSparseBlockGemv)) == 1
示例7: test_multMatVect
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def test_multMatVect():
A1 = tensor.lmatrix('A1')
s1 = tensor.ivector('s1')
m1 = tensor.iscalar('m1')
A2 = tensor.lmatrix('A2')
s2 = tensor.ivector('s2')
m2 = tensor.iscalar('m2')
g0 = rng_mrg.DotModulo()(A1, s1, m1, A2, s2, m2)
f0 = theano.function([A1, s1, m1, A2, s2, m2], g0)
i32max = numpy.iinfo(numpy.int32).max
A1 = numpy.random.randint(0, i32max, (3, 3)).astype('int64')
s1 = numpy.random.randint(0, i32max, 3).astype('int32')
m1 = numpy.asarray(numpy.random.randint(i32max), dtype="int32")
A2 = numpy.random.randint(0, i32max, (3, 3)).astype('int64')
s2 = numpy.random.randint(0, i32max, 3).astype('int32')
m2 = numpy.asarray(numpy.random.randint(i32max), dtype="int32")
f0.input_storage[0].storage[0] = A1
f0.input_storage[1].storage[0] = s1
f0.input_storage[2].storage[0] = m1
f0.input_storage[3].storage[0] = A2
f0.input_storage[4].storage[0] = s2
f0.input_storage[5].storage[0] = m2
r_a1 = rng_mrg.matVecModM(A1, s1, m1)
r_a2 = rng_mrg.matVecModM(A2, s2, m2)
f0.fn()
r_b = f0.output_storage[0].value
assert numpy.allclose(r_a1, r_b[:3])
assert numpy.allclose(r_a2, r_b[3:])
示例8: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def __init__(self, dim, **kwargs):
super(LookupBottom, self).__init__(**kwargs)
self.dim = dim
self.mask = tensor.matrix('inputs_mask')
self.batch_inputs = {
'inputs': tensor.lmatrix('inputs')}
self.single_inputs = {
'inputs': tensor.lvector('inputs')}
self.children = [LookupTable(self.input_num_chars['inputs'], self.dim)]
示例9: test_blocksparse_inplace_gemv_opt
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def test_blocksparse_inplace_gemv_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o)
if theano.config.mode == "FAST_COMPILE":
assert not f.maker.fgraph.toposort()[-1].op.inplace
else:
assert f.maker.fgraph.toposort()[-1].op.inplace
示例10: multMatVect
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def multMatVect(v, A, m1, B, m2):
"""
Multiply the first half of v by A with a modulo of m1 and the second half
by B with a modulo of m2.
Notes
-----
The parameters of dot_modulo are passed implicitly because passing them
explicitly takes more time than running the function's C-code.
"""
if multMatVect.dot_modulo is None:
A_sym = tensor.lmatrix('A')
s_sym = tensor.ivector('s')
m_sym = tensor.iscalar('m')
A2_sym = tensor.lmatrix('A2')
s2_sym = tensor.ivector('s2')
m2_sym = tensor.iscalar('m2')
o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
multMatVect.dot_modulo = function(
[A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o, profile=False)
# This way of calling the Theano fct is done to bypass Theano overhead.
f = multMatVect.dot_modulo
f.input_storage[0].storage[0] = A
f.input_storage[1].storage[0] = v[:3]
f.input_storage[2].storage[0] = m1
f.input_storage[3].storage[0] = B
f.input_storage[4].storage[0] = v[3:]
f.input_storage[5].storage[0] = m2
f.fn()
r = f.output_storage[0].storage[0]
return r
示例11: test_blocksparse_gpu_outer_opt
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def test_blocksparse_gpu_outer_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], [o, tensor.grad(o.sum(),
wrt=W)],
mode=mode_with_gpu)
assert isinstance(f.maker.fgraph.toposort()[-2].op, GpuSparseBlockOuter)
示例12: test_lookup_table
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def test_lookup_table():
lt = LookupTable(5, 3)
lt.allocate()
lt.W.set_value(numpy.arange(15).reshape(5, 3).astype(theano.config.floatX))
x = tensor.lmatrix("x")
y = lt.apply(x)
f = theano.function([x], [y])
x_val = [[1, 2], [0, 3]]
desired = numpy.array([[[3, 4, 5], [6, 7, 8]], [[0, 1, 2], [9, 10, 11]]],
dtype=theano.config.floatX)
assert_equal(f(x_val)[0], desired)
# Test get_dim
assert_equal(lt.get_dim(lt.apply.inputs[0]), 0)
assert_equal(lt.get_dim(lt.apply.outputs[0]), lt.dim)
assert_raises(ValueError, lt.get_dim, 'random_name')
# Test feedforward interface
assert lt.input_dim == 0
assert lt.output_dim == 3
lt.output_dim = 4
assert lt.output_dim == 4
def assign_input_dim():
lt.input_dim = 11
assert_raises(ValueError, assign_input_dim)
lt.input_dim = 0
示例13: test_with_extra_dims_cross_entropy_3d
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def test_with_extra_dims_cross_entropy_3d():
x = tensor.tensor3('x')
y = tensor.lmatrix('y')
brick = SoftmaxWithExtraDims()
f = theano.function(
[y, x], [brick.categorical_cross_entropy(y, x, extra_ndim=1)])
assert_allclose(
f([[0, 1], [2, 3]],
[[[1, 2, 1, 2], [1, 2, 3, 4]],
[[4, 3, 2, 1], [2, 2, 2, 2]]])[0],
numpy.array([[2.0064, 2.44019],
[2.44019, 1.3863]]),
rtol=1e-5)
示例14: make_theano_batch
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def make_theano_batch(self, name=None, dtype=None, batch_size=None):
if batch_size == 1:
rval = tensor.lrow(name=name)
else:
rval = tensor.lmatrix(name=name)
if theano.config.compute_test_value != 'off':
if batch_size == 1:
n = 1
else:
# TODO: try to extract constant scalar value from batch_size
n = 4
rval.tag.test_value = self.get_origin_batch(batch_size=n,
dtype=dtype)
return rval
示例15: Xtest_blocksparse_grad_merge
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import lmatrix [as 别名]
def Xtest_blocksparse_grad_merge(self):
b = tensor.fmatrix()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data()
W = float32_shared_constructor(W_val)
o = gpu_sparse_block_gemv(b.take(oIdx, axis=0), W, h, iIdx, oIdx)
gW = theano.grad(o.sum(), W)
lr = numpy.asarray(0.05, dtype='float32')
upd = W - lr * gW
f1 = theano.function([h, iIdx, b, oIdx], updates=[(W, upd)],
mode=mode_with_gpu)
# Make sure the lr update was merged.
assert isinstance(f1.maker.fgraph.outputs[0].owner.op,
GpuSparseBlockOuter)
# Exclude the merge optimizations.
mode = mode_with_gpu.excluding('local_merge_blocksparse_alpha')
mode = mode.excluding('local_merge_blocksparse_output')
f2 = theano.function([h, iIdx, b, oIdx], updates=[(W, upd)], mode=mode)
# Make sure the lr update is not merged.
assert not isinstance(f2.maker.fgraph.outputs[0].owner.op,
GpuSparseBlockOuter)
f2(h_val, iIdx_val, b_val, oIdx_val)
W_ref = W.get_value()
# reset the var
W.set_value(W_val)
f1(h_val, iIdx_val, b_val, oIdx_val)
W_opt = W.get_value()
utt.assert_allclose(W_ref, W_opt)