本文整理汇总了Python中theano.tensor.vectors函数的典型用法代码示例。如果您正苦于以下问题:Python vectors函数的具体用法?Python vectors怎么用?Python vectors使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了vectors函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_connection_pattern_override
def test_connection_pattern_override(self, cls_ofg):
x, y = T.vectors('xy')
def f1(x, y):
del x
# but we know how to backpropagate for x for some reasons
# and we don't care about the gradient wrt y.
return y + T.round(y)
def f1_back(inputs, output_gradients):
return [
output_gradients[0],
theano.gradient.disconnected_type()]
op = cls_ofg(
inputs=[x, y],
outputs=[f1(x, y)],
grad_overrides=f1_back,
connection_pattern=[[True], [False]], # This is new
on_unused_input='ignore') # This is new
c = op(x, y)
g1 = theano.grad(c.sum(), x)
out = g1.eval({
x: np.ones((5,), dtype=np.float32),
y: np.ones((5,), dtype=np.float32)})
assert np.allclose(out, [1.] * 5)
示例2: test_input_dimensions_overflow
def test_input_dimensions_overflow(self):
# Elemwise.perform used to compute the product
# of input shapes to check if there was a zero in them,
# it overflowed in this case.
a, b, c, d, e, f = tensor.vectors("abcdef")
s = a + b + c + d + e + f
g = theano.function([a, b, c, d, e, f], s, mode=theano.compile.Mode(linker="py"))
g(*[numpy.zeros(2 ** 11, config.floatX) for i in xrange(6)])
示例3: test_single_var
def test_single_var(self):
# Test `is_same_graph` with some trivial graphs (one Variable).
x, y, z = tensor.vectors('x', 'y', 'z')
self.check([
(x, x, (({}, True), )),
(x, y, (({}, False), ({y: x}, True), )),
(x, tensor.neg(x), (({}, False), )),
(x, tensor.neg(y), (({}, False), )),
])
示例4: test_single_var
def test_single_var(self):
"""
Test `is_same_graph` with some trivial graphs (one Variable).
"""
x, y, z = tensor.vectors("x", "y", "z")
self.check(
[
(x, x, (({}, True),)),
(x, y, (({}, False), ({y: x}, True))),
(x, tensor.neg(x), (({}, False),)),
(x, tensor.neg(y), (({}, False),)),
]
)
示例5: test_full_graph
def test_full_graph(self):
# Test `is_same_graph` with more complex graphs.
x, y, z = tensor.vectors('x', 'y', 'z')
t = x * y
self.check([
(x * 2, x * 2, (({}, True), )),
(x * 2, y * 2, (({}, False), ({y: x}, True), )),
(x * 2, y * 2, (({}, False), ({x: y}, True), )),
(x * 2, y * 3, (({}, False), ({y: x}, False), )),
(t * 2, z * 2, (({}, False), ({t: z}, True), )),
(t * 2, z * 2, (({}, False), ({z: t}, True), )),
(x * (y * z), (x * y) * z, (({}, False), )),
])
示例6: test_nested
def test_nested(self, cls_ofg):
x, y = T.vectors('xy')
u, v = x + y, x - y
op_ft = cls_ofg([x, y], [u, v])
op_ift = cls_ofg([x, y], [u / 2, v / 2])
xx, yy = T.vector('xx'), T.vector('yy')
xx2, yy2 = op_ift(*op_ft(xx, yy))
fn = function([xx, yy], [xx2, yy2])
xv = np.random.rand(16).astype(config.floatX)
yv = np.random.rand(16).astype(config.floatX)
xv2, yv2 = fn(xv, yv)
assert np.allclose(xv, xv2)
assert np.allclose(yv, yv2)
示例7: test_rop_override
def test_rop_override(self, cls_ofg):
x, y = T.vectors('xy')
def ro(inps, epts):
x, y = inps
u, v = epts
return [u * y * 2. + x * v * 1.5]
u, v = T.vectors('uv')
op_mul_rop = cls_ofg([x, y, u, v], ro([x, y], [u, v]))
op_mul = cls_ofg([x, y], [x * y], rop_overrides=ro)
op_mul2 = cls_ofg([x, y], [x * y], rop_overrides=op_mul_rop)
# single override case
xx, yy = T.vector('xx'), T.vector('yy')
du, dv = T.vector('du'), T.vector('dv')
for op in [op_mul, op_mul2]:
zz = op_mul(xx, yy)
dw = T.Rop(zz, [xx, yy], [du, dv])
fn = function([xx, yy, du, dv], dw)
vals = np.random.rand(4, 32).astype(config.floatX)
dwval = fn(*vals)
assert np.allclose(
dwval, vals[0] * vals[3] * 1.5 + vals[1] * vals[2] * 2.)
示例8: test_merge_only
def test_merge_only(self):
# Test `is_same_graph` when `equal_computations` cannot be used.
x, y, z = tensor.vectors('x', 'y', 'z')
t = x * y
self.check([
(x, t, (({}, False), ({t: x}, True))),
(t * 2, x * 2, (({}, False), ({t: x}, True), )),
(x * x, x * y, (({}, False), ({y: x}, True), )),
(x * x, x * y, (({}, False), ({y: x}, True), )),
(x * x + z, x * y + t, (({}, False),
({y: x}, False),
({y: x, t: z}, True))),
],
debug=False)
示例9: test_c_thunks
def test_c_thunks():
a = tensor.scalars('a')
b, c = tensor.vectors('bc')
cases = [False]
if theano.config.cxx:
cases.append(True)
for c_thunks in cases:
f = function([a, b, c], ifelse(a, a * b, b * c),
mode=Mode(
optimizer=None,
linker=vm.VM_Linker(c_thunks=c_thunks,
use_cloop=False)))
f(1, [2], [3, 2])
from nose.tools import assert_raises
assert_raises(ValueError, f, 0, [2], [3, 4])
assert any([hasattr(t, 'cthunk') for t in f.fn.thunks]) == c_thunks
示例10: compile
def compile(self,X,n_negative_samples=None):
if n_negative_samples is None:
n_negative_samples = 1000
pos_samples = X.loc[:, self.column_ranges.keys()].values.astype(floatX)
pos_data, neg_data = T.matrices('SigData', 'BckData')
pos_w, neg_w, parameters = T.vectors('SigW', 'BckW', 'parameters')
neg_samples, neg_weight = self.generate_negative_samples(n_negative_samples=n_negative_samples,
strategy=self.sampling_strategy)
givens = {pos_data: pos_samples, neg_data: neg_samples, neg_w: neg_weight}
pdf = self.prepare_pdf()
pdfs, summands = pdf(pos_data, neg_data, neg_weights=neg_w, weights=parameters)
result = - T.mean(pos_w * T.log(pdfs))
self.Tfunction = theano.function([parameters,pos_w], result, givens=givens)
self.Tderivative = theano.function([parameters,pos_w], T.grad(result, parameters), givens=givens)
self.X=X
示例11: test_grad_override
def test_grad_override(self, cls_ofg):
x, y = T.vectors('xy')
def go(inps, gs):
x, y = inps
g, = gs
return [g * y * 2, g * x * 1.5]
dedz = T.vector('dedz')
op_mul_grad = cls_ofg([x, y, dedz], go([x, y], [dedz]))
op_mul = cls_ofg([x, y], [x * y], grad_overrides=go)
op_mul2 = cls_ofg([x, y], [x * y], grad_overrides=op_mul_grad)
# single override case (function or OfG instance)
xx, yy = T.vector('xx'), T.vector('yy')
for op in [op_mul, op_mul2]:
zz = T.sum(op(xx, yy))
dx, dy = T.grad(zz, [xx, yy])
fn = function([xx, yy], [dx, dy])
xv = np.random.rand(16).astype(config.floatX)
yv = np.random.rand(16).astype(config.floatX)
dxv, dyv = fn(xv, yv)
assert np.allclose(yv * 2, dxv)
assert np.allclose(xv * 1.5, dyv)
# list override case
def go1(inps, gs):
x, w, b = inps
g = gs[0]
return g * w * 2
def go2(inps, gs):
x, w, b = inps
g = gs[0]
return g * x * 1.5
w, b = T.vectors('wb')
# we make the 3rd gradient default (no override)
op_linear = cls_ofg([x, w, b], [x * w + b], grad_overrides=[go1, go2, 'default'])
xx, ww, bb = T.vector('xx'), T.vector('yy'), T.vector('bb')
zz = T.sum(op_linear(xx, ww, bb))
dx, dw, db = T.grad(zz, [xx, ww, bb])
fn = function([xx, ww, bb], [dx, dw, db])
xv = np.random.rand(16).astype(config.floatX)
wv = np.random.rand(16).astype(config.floatX)
bv = np.random.rand(16).astype(config.floatX)
dxv, dwv, dbv = fn(xv, wv, bv)
assert np.allclose(wv * 2, dxv)
assert np.allclose(xv * 1.5, dwv)
assert np.allclose(np.ones(16, dtype=config.floatX), dbv)
# NullType and DisconnectedType
op_linear2 = cls_ofg(
[x, w, b], [x * w + b],
grad_overrides=[go1, NullType()(), DisconnectedType()()])
zz2 = T.sum(op_linear2(xx, ww, bb))
dx2, dw2, db2 = T.grad(
zz2, [xx, ww, bb],
return_disconnected='Disconnected',
disconnected_inputs='ignore',
null_gradients='return')
assert isinstance(dx2.type, T.TensorType)
assert dx2.ndim == 1
assert isinstance(dw2.type, NullType)
assert isinstance(db2.type, DisconnectedType)
示例12: ifelse
from theano import tensor as T
from theano.ifelse import ifelse
import theano, time
import numpy as np
if __name__ == "__main__":
a,b = T.scalars('a', 'b')
x,y = T.vectors('x', 'y')
z_lazy = ifelse(T.eq(a, b), # condition
T.mean(x), # then branch
T.mean(y)) # else branch
var_1 = np.array([1, 2])
var_2 = np.array([3, 4])
condition_1 = 1
condition_2 = 1
iffunction = theano.function([a,b,x,y],[z_lazy])
result = iffunction(condition_1, condition_2, var_1, var_2)
print result
示例13: any
#
# if any([x.op.__class__.__name__ in ['Gemv', 'CGmv', 'Gemm', 'CGemm'] for x in
# train.maker.fgraph.toposort()]):
# print 'Used the cpu'
# elif any([x.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for x in
# train.maker.fgraph.toposort()]):
# print 'Used the gpu'
# else:
# print('ERROR, not able to tell if theano used the cpu or the gpu')
# print(train.maker.fgraph.toposort())
#
# for i in range(training_step):
# pred, err = train(D[0], D[1])
#
# print("target values for D")
# print(D[1])
#
# print("prediction on D")
# print(predict(D[0]))
# x = T.dvector('x')
# f = theano.function(inputs=[x], outputs=10*x, mode='DebugMode')
# f([5])
# f([0])
# f([7])
from theano import ProfileMode
profmode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
v1, v2 = T.vectors(2)
o = v1 + v2
f = theano.function([v1,v2],[o], mode=profmode)
示例14: test_functions
def test_functions(self):
Case = namedtuple("Case", "func input_data answer")
testcases = [
Case(
func=cg.fletcher_reeves,
input_data=(
np.array([1.35, 0.3]),
np.array([0.11, -0.5]),
np.array([0, 0]),
),
answer=0.137
),
Case(
func=cg.polak_ribiere,
input_data=(
np.array([1., -0.5]),
np.array([1.2, -0.45]),
np.array([0, 0]),
),
answer=0.174
),
Case(
func=cg.hentenes_stiefel,
input_data=(
np.array([1., -0.5]),
np.array([1.2, -0.45]),
np.array([0.2, 0.05]),
),
answer=5.118
),
Case(
func=cg.conjugate_descent,
input_data=(
np.array([1., -0.5]),
np.array([1.2, -0.45]),
np.array([0.2, 0.05]),
),
answer=-7.323
),
Case(
func=cg.liu_storey,
input_data=(
np.array([1., -0.5]),
np.array([1.2, -0.45]),
np.array([0.2, 0.05]),
),
answer=1.243
),
Case(
func=cg.dai_yuan,
input_data=(
np.array([1., -0.5]),
np.array([1.2, -0.45]),
np.array([0.2, 0.05]),
),
answer=38.647
),
]
for testcase in testcases:
input_data = asfloat(np.array(testcase.input_data))
variables = T.vectors(3)
# For functions some input variables can be optional and we
# ignore them during the computation. This solution cause errors
# related to the Theano computational graph, because we
# do not use all defined variables. That's why we need
# simple hack that fix this issue and do not add changes to
# the output result.
hack = asfloat(0) * variables[-1][0]
output_func = theano.function(
variables,
testcase.func(*variables) + hack
)
result = output_func(*input_data)
self.assertAlmostEqual(result, testcase.answer, places=3)