本文整理汇总了Python中theano.tensor.col方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.col方法的具体用法?Python tensor.col怎么用?Python tensor.col使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.col方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_wrong_broadcast
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import col [as 别名]
def test_wrong_broadcast(self):
a = tt.col()
increment = tt.vector()
# These symbolic graphs legitimate, as long as increment has exactly
# one element. So it should fail at runtime, not at compile time.
rng = numpy.random.RandomState(utt.fetch_seed())
def rng_randX(*shape):
return rng.rand(*shape).astype(theano.config.floatX)
for op in (tt.set_subtensor, tt.inc_subtensor):
for base in (a[:], a[0]):
out = op(base, increment)
f = theano.function([a, increment], out)
# This one should work
f(rng_randX(3, 1), rng_randX(1))
# These ones should not
self.assertRaises(ValueError,
f, rng_randX(3, 1), rng_randX(2))
self.assertRaises(ValueError,
f, rng_randX(3, 1), rng_randX(3))
self.assertRaises(ValueError,
f, rng_randX(3, 1), rng_randX(0))
示例2: test_ndim_mismatch
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import col [as 别名]
def test_ndim_mismatch(self):
rng = numpy.random.RandomState(utt.fetch_seed())
data = rng.rand(5).astype(self.dtype)
x = self.shared(data)
y = tensor.col('y', self.dtype)
cond = theano.tensor.iscalar('cond')
self.assertRaises(TypeError, ifelse, cond, x, y)
self.assertRaises(TypeError, ifelse, cond, y, x)
示例3: neural_tensor_network
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import col [as 别名]
def neural_tensor_network():
# tensor params
subj = T.col('e_1')
targets = T.matrix('e_2')
W = T.tensor3('W')
# neural net params
u = T.col('u')
V = T.matrix('V')
b = T.col('b')
# tensor
h = subj.T.dot(W).dot(targets)
# neural net
d = subj.shape[0]
V_subj = V[:, :d].dot(subj)
V_targ = V[:, d:].dot(targets)
activations = T.tanh(h + V_subj + V_targ + b)
score = u.T.dot(activations).reshape((-1, 1))
margins = score[0] - score[1:]
cost = T.min(T.concatenate((T.ones_like(margins), margins), axis=1), axis=1).mean()
gsubj, gtargets, gW, gu, gV, gb = T.grad(cost, [subj, targets, W, u, V, b])
print 'Compiling NTN score'
score = theano.function([subj, W, targets, u, V, b], score, name='NTN Score',
mode='FAST_RUN')
print 'Compiling NTN fprop'
fprop = theano.function([subj, W, targets, u, V, b], cost, name='NTN fprop',
mode='FAST_RUN')
print 'Compiling NTN bprop'
bprop = theano.function([subj, W, targets, u, V, b],
outputs=[gsubj, gW, gtargets, gu, gV, gb],
name='NTN bprop', mode='FAST_RUN')
return {'score': score, 'fprop': fprop, 'bprop': bprop}
示例4: test_gemm_canonicalize
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import col [as 别名]
def test_gemm_canonicalize():
X, Y, Z, a, b = T.matrix('X'), T.matrix('Y'), T.matrix('Z'), T.scalar(
'a'), T.scalar('b')
R, S, U, c, d = T.matrix('R'), T.matrix('S'), T.matrix('U'), T.scalar(
'c'), T.scalar('d')
u = T.row('u')
v = T.vector('v')
w = T.col('w')
can = []
_gemm_canonicalize(X + Y + Z, 1.0, can, 0)
assert can == [(1.0, X), (1.0, Y), (1.0, Z)]
can = []
_gemm_canonicalize(X + Y + u, 1.0, can, 0)
assert can == [(1.0, X), (1.0, Y), (1.0, u)], can
can = []
_gemm_canonicalize(X + Y + v, 1.0, can, 0)
# [(1.0, X), (1.0, Y), (1.0, InplaceDimShuffle{x,0}(v))]
assert can[:2] == [(1.0, X), (1.0, Y)]
assert isinstance(can[2], tuple)
assert len(can[2]) == 2
assert can[2][0] == 1.0
assert can[2][1].owner
assert isinstance(can[2][1].owner.op, T.DimShuffle)
assert can[2][1].owner.inputs == [v]
can = []
_gemm_canonicalize(X + Y + w, 1.0, can, 0)
assert can == [(1.0, X), (1.0, Y), (1.0, w)], can
can = []
_gemm_canonicalize(a * X + Y - b * Z * c, 1.0, can, 0)
assert can[0] == (a, X)
assert can[1] == (1.0, Y)
assert can[2][0].owner.op == T.mul
assert can[2][0].owner.inputs[0].owner.op == T.neg
assert can[2][0].owner.inputs[0].owner.inputs[0] == c
assert can[2][0].owner.inputs[1] == b
can = []
_gemm_canonicalize((-d) * X - (a * X + Y - b * Z * c), 1.0, can, 0)
# print can
assert can[0][0].owner.op == T.neg
assert can[0][0].owner.inputs[0] == d
assert can[0][1] == X
assert can[1][0].owner.op == T.neg
assert can[1][0].owner.inputs[0] == a
assert can[2] == (-1.0, Y)
assert can[3][0].owner.op == T.mul
assert can[3][0].owner.inputs == [c, b]
示例5: transE_model
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import col [as 别名]
def transE_model():
'''
Note X_S is a column and X_T is a matrix so that broadcasting occurs
across the columns of X_T (this allows batching X_T with negatives,
for example.
'''
# construct theano expression graph
X_s = T.col('X_s')
W = T.matrix('W')
X_t = T.matrix('X_t')
rels = W[:, :, None].transpose(1, 0, 2)
# Computes x_{r_1} + x_{r_{2}} + ... + x_{r_n} - X_{t}
results, updates = theano.scan(fn=lambda rel, v: rel + v,
outputs_info=-X_t, sequences=[rels])
# score is always a column vector
score = T.sum((X_s + results[-1]) ** 2, axis=0).reshape((-1, 1))
margins = 1. + score[0] - score[1:]
# zero out negative entries
pos_parts = margins * (margins > 0)
# we are using online Maximizer, so the objective is negated
cost = -pos_parts.mean()
gX_s, gW, gX_t = T.grad(cost, [X_s, W, X_t])
print 'Compiling TransE score'
# return negative score since this is a ranking
score = theano.function([X_s, W, X_t], -score, name='transE Score',
mode='FAST_RUN')
score.trust_input = True
print 'Compiling TransE fprop'
fprop = theano.function([X_s, W, X_t], cost, name='transE fprop',
mode='FAST_RUN')
fprop.trust_input = True
print 'Compiling TransE bprop'
bprop = theano.function([X_s, W, X_t],
outputs=[gX_s, gW, gX_t],
name='transE bprop', mode='FAST_RUN')
bprop.trust_input = True
return {'score': score, 'fprop': fprop, 'bprop': bprop}
# Note: Model assumes we only use corrupted target entities with the first
# target entity the true entity