本文整理汇总了Python中theano.tensor.matrices方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.matrices方法的具体用法?Python tensor.matrices怎么用?Python tensor.matrices使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.matrices方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_straightforward
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_straightforward(self):
x, y, z = T.matrices('xyz')
e = x + y * z
op = OpFromGraph([x, y, z], [e])
# (1+3*5=array of 16) - (3+1*5=array of 8)
f = op(x, y, z) - op(y, z, x)
fn = function([x, y, z], f)
xv = numpy.ones((2, 2), dtype=config.floatX)
yv = numpy.ones((2, 2), dtype=config.floatX) * 3
zv = numpy.ones((2, 2), dtype=config.floatX) * 5
# print function, function.__module__
# print fn.maker.fgraph.toposort()
fn(xv, yv, zv)
assert numpy.all(8.0 == fn(xv, yv, zv))
assert numpy.all(8.0 == fn(xv, yv, zv))
示例2: test_shared_grad
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_shared_grad(self):
x, y, z = T.matrices('xyz')
s = shared(numpy.random.rand(2, 2).astype(config.floatX))
e = x + y * z + s
op = OpFromGraph([x, y, z], [e])
f = op(x, y, z)
f = f - T.grad(T.sum(f), y)
fn = function([x, y, z], f)
xv = numpy.ones((2, 2), dtype=config.floatX)
yv = numpy.ones((2, 2), dtype=config.floatX) * 3
zv = numpy.ones((2, 2), dtype=config.floatX) * 5
assert numpy.allclose(11.0 + s.get_value(), fn(xv, yv, zv))
# grad again the shared variable
f = op(x, y, z)
f = f - T.grad(T.sum(f), s)
fn = function([x, y, z], f)
assert numpy.allclose(15.0 + s.get_value(),
fn(xv, yv, zv))
示例3: test_straightforward
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_straightforward(self):
x, y, z = T.matrices('xyz')
e = x + y * z
op = OpFromGraph([x, y, z], [e])
# (1+3*5=array of 16) - (3+1*5=array of 8)
f = op(x, y, z) - op(y, z, x)
fn = function([x, y, z], f)
xv = numpy.ones((2, 2), dtype=config.floatX)
yv = numpy.ones((2, 2), dtype=config.floatX)*3
zv = numpy.ones((2, 2), dtype=config.floatX)*5
# print function, function.__module__
# print fn.maker.fgraph.toposort()
fn(xv, yv, zv)
assert numpy.all(8.0 == fn(xv, yv, zv))
assert numpy.all(8.0 == fn(xv, yv, zv))
示例4: __getstate__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def __getstate__(self):
"""
Used by pickle. Returns a dictionary to pickle in place of
self.__dict__.
If self.matrices_save_path is set, this saves the matrices P_ and
inv_P_ separately in matrices_save_path as a .npz archive, which uses
much less space & memory than letting pickle handle them.
"""
result = copy.copy(self.__dict__) # shallow copy
if self.matrices_save_path is not None:
matrices = {'P_': self.P_}
if self.inv_P_ is not None:
matrices['inv_P_'] = self.inv_P_
np.savez(self.matrices_save_path, **matrices)
# Removes the matrices from the dictionary to be pickled.
for key, matrix in matrices.items():
del result[key]
return result
示例5: test_allclose
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_allclose(self):
m = theano.config.mode
m = theano.compile.get_mode(m)
m.check_isfinite = False
x, y = tensor.matrices('xy')
# regular softmax and crossentropy
sm = tensor.nnet.softmax(x)
cm = tensor.nnet.categorical_crossentropy(sm, y)
# numerically stable log-softmax with crossentropy
logsm = tensor.nnet.logsoftmax(x)
sm2 = tensor.exp(logsm) # just used to show equivalence with sm
cm2 = -tensor.sum(y * logsm, axis=1)
grad = tensor.grad(cm2.mean(), x)
# create some inputs into a softmax that are large and labels
a = numpy.exp(10 * numpy.random.rand(5, 10).astype(theano.config.floatX))
# create some one-hot coded labels
b = numpy.eye(5, 10).astype(theano.config.floatX)
# show equivalence of softmax and exponentiated numerically stable
# log-softmax
f1 = theano.function([x], [sm, sm2])
sm_, sm2_ = f1(a)
utt.assert_allclose(sm_, sm2_)
# now show that the two versions result in the same crossentropy cost
# this indicates that the forward function does provide some numerical
# stability
f2 = theano.function([x, y], [cm, cm2], mode=m)
cm_, cm2_ = f2(a, b)
utt.assert_allclose(cm_, cm2_)
# now, show that in the standard softmax case the gradients blow up
# while in the log-softmax case they don't
f3 = theano.function([x, y], [grad])
grad_ = f3(a, b)
assert numpy.all(numpy.isnan(grad_) == False)
示例6: test_local_softmax_optimization
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_local_softmax_optimization(self):
"""Test the Logsoftmax substitution
Check that Log(Softmax(x)) is substituted with Logsoftmax(x). Note that
only the forward pass is checked (i.e., doesn't check the gradient)
"""
x, y = tensor.matrices('xy')
sm = tensor.nnet.softmax(x)
logsm = tensor.log(sm)
f = theano.function([x], logsm)
self.assertTrue(hasattr(f.maker.fgraph.outputs[0].tag, 'trace'))
assert isinstance(f.maker.fgraph.outputs[0].owner.op,
theano.tensor.nnet.nnet.LogSoftmax)
示例7: test_dot22
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_dot22(self):
x, y = T.matrices('xy')
self._compile_and_check(
[x, y], [T.blas._dot22(x, y)],
[numpy.random.random((2, 3)).astype(config.floatX),
numpy.random.random((3, 4)).astype(config.floatX)],
T.blas.Dot22)
示例8: test_gemm
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_gemm(self):
x, y, z = T.matrices('xyz')
a = T.scalar('a')
b = T.scalar('b')
self._compile_and_check(
[x, y, a, z, b], [T.blas.gemm(z, a, x, y, b)],
[numpy.random.random((2, 3)).astype(config.floatX),
numpy.random.random((3, 4)).astype(config.floatX),
numpy.asarray(0.5, dtype=config.floatX),
numpy.random.random((2, 4)).astype(config.floatX),
numpy.asarray(0.5, dtype=config.floatX)],
T.blas.Gemm)
示例9: test_size_changes
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_size_changes(self):
x, y, z = T.matrices('xyz')
e = T.dot(x, y)
op = OpFromGraph([x, y], [e])
f = op(x, op(y, z))
fn = function([x, y, z], f)
xv = numpy.ones((2, 3), dtype=config.floatX)
yv = numpy.ones((3, 4), dtype=config.floatX) * 3
zv = numpy.ones((4, 5), dtype=config.floatX) * 5
res = fn(xv, yv, zv)
assert res.shape == (2, 5)
assert numpy.all(180.0 == res)
res = fn(xv, yv, zv)
assert res.shape == (2, 5)
assert numpy.all(180.0 == res)
示例10: test_grad
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_grad(self):
x, y, z = T.matrices('xyz')
e = x + y * z
op = OpFromGraph([x, y, z], [e])
f = op(x, y, z)
f = f - T.grad(T.sum(f), y)
fn = function([x, y, z], f)
xv = numpy.ones((2, 2), dtype=config.floatX)
yv = numpy.ones((2, 2), dtype=config.floatX) * 3
zv = numpy.ones((2, 2), dtype=config.floatX) * 5
assert numpy.all(11.0 == fn(xv, yv, zv))
示例11: test_grad_grad
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_grad_grad(self):
x, y, z = T.matrices('xyz')
e = x + y * z
op = OpFromGraph([x, y, z], [e])
f = op(x, y, z)
f = f - T.grad(T.sum(f), y)
f = f - T.grad(T.sum(f), y)
fn = function([x, y, z], f)
xv = numpy.ones((2, 2), dtype=config.floatX)
yv = numpy.ones((2, 2), dtype=config.floatX) * 3
zv = numpy.ones((2, 2), dtype=config.floatX) * 5
assert numpy.allclose(6.0, fn(xv, yv, zv))
示例12: test_connection_pattern
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_connection_pattern(self):
# Basic case
x, y, z = T.matrices('xyz')
out1 = x * y
out2 = y * z
op1 = OpFromGraph([x, y, z], [out1, out2])
results = op1.connection_pattern(None)
expect_result = [[True, False],
[True, True],
[False, True]]
assert results == expect_result
# Graph with ops that don't have a 'full' connection pattern
# and with ops that have multiple outputs
m, n, p, q = T.matrices('mnpq')
o1, o2 = op1(m, n, p)
out1, out2 = op1(o1, q, o2)
op2 = OpFromGraph([m, n, p, q], [out1, out2])
results = op2.connection_pattern(None)
expect_result = [[True, False],
[True, True],
[False, True],
[True, True]]
assert results == expect_result
# Inner graph where some computation doesn't rely on explicit inputs
srng = RandomStreams(seed=234)
rv_u = srng.uniform((2, 2))
x, y = T.matrices('xy')
out1 = x + rv_u
out2 = y + 3
out3 = 3 + rv_u
op3 = OpFromGraph([x, y], [out1, out2, out3])
results = op3.connection_pattern(None)
expect_result = [[True, False, False],
[False, True, False],
[True, False, True]]
assert results == expect_result
示例13: test_allclose
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_allclose(self):
m = theano.config.mode
m = theano.compile.get_mode(m)
m.check_isfinite = False
x, y = tensor.matrices('xy')
# regular softmax and crossentropy
sm = tensor.nnet.softmax(x)
cm = tensor.nnet.categorical_crossentropy(sm, y)
# numerically stable log-softmax with crossentropy
logsm = tensor.nnet.logsoftmax(x)
sm2 = tensor.exp(logsm) # just used to show equivalence with sm
cm2 = -tensor.sum(y*logsm, axis=1)
grad = tensor.grad(cm2.mean(), x)
# create some inputs into a softmax that are large and labels
a = numpy.exp(10*numpy.random.rand(5, 10).astype(theano.config.floatX))
# create some one-hot coded labels
b = numpy.eye(5, 10).astype(theano.config.floatX)
# show equivalence of softmax and exponentiated numerically stable
# log-softmax
f1 = theano.function([x], [sm, sm2])
sm_, sm2_ = f1(a)
utt.assert_allclose(sm_, sm2_)
# now show that the two versions result in the same crossentropy cost
# this indicates that the forward function does provide some numerical
# stability
f2 = theano.function([x, y], [cm, cm2], mode=m)
cm_, cm2_ = f2(a, b)
utt.assert_allclose(cm_, cm2_)
# now, show that in the standard softmax case the gradients blow up
# while in the log-softmax case they don't
f3 = theano.function([x, y], [grad])
grad_ = f3(a, b)
assert numpy.all(numpy.isnan(grad_) == False)
示例14: test_local_softmax_optimization
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_local_softmax_optimization(self):
"""Test the Logsoftmax substitution
Check that Log(Softmax(x)) is substituted with Logsoftmax(x). Note that
only the forward pass is checked (i.e., doesn't check the gradient)
"""
x, y = tensor.matrices('xy')
sm = tensor.nnet.softmax(x)
logsm = tensor.log(sm)
f = theano.function([x], logsm)
assert isinstance(f.maker.fgraph.outputs[0].owner.op,
theano.tensor.nnet.nnet.LogSoftmax)
示例15: test_size_changes
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import matrices [as 别名]
def test_size_changes(self):
x, y, z = T.matrices('xyz')
e = T.dot(x, y)
op = OpFromGraph([x, y], [e])
f = op(x, op(y, z))
fn = function([x, y, z], f)
xv = numpy.ones((2, 3), dtype=config.floatX)
yv = numpy.ones((3, 4), dtype=config.floatX)*3
zv = numpy.ones((4, 5), dtype=config.floatX)*5
res = fn(xv, yv, zv)
assert res.shape == (2, 5)
assert numpy.all(180.0 == res)
res = fn(xv, yv, zv)
assert res.shape == (2, 5)
assert numpy.all(180.0 == res)