本文整理汇总了Python中theano.tensor.vector函数的典型用法代码示例。如果您正苦于以下问题:Python vector函数的具体用法?Python vector怎么用?Python vector使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了vector函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: calculate
def calculate(w1, w2, data, display):
x = T.vector('x')
w = T.vector('w')
s = 1 / (1 + T.exp(-T.dot(x, w)))
logistic = theano.function([x, w], s)
if display:
print("With: w1 = %f and w2 = %f" % (w1, w2))
sum_error = 0
sum_error_square = 0
if isinstance(data, str) or not len(data):
if not len(data):
data = 'Data.txt'
with open('dataFiles/' + data) as fp:
reader = csv.reader(fp, delimiter=',')
for line in reader:
data.append([int(line[0]), float(line[1]), float(line[2])])
if display:
print('y\t\tf(x)\t\tE\t\tE^2')
for i in range(0, len(data)):
x1 = data[i][1]
x2 = data[i][2]
f = logistic([x1, x2], [w1, w2])
e = data[i][0] - f
e2 = e ** 2
sum_error += e
sum_error_square += e2
if display:
print('%f\t%f\t%f\t%f' % (data[i][0], f, e, e2))
if display:
print("\nSum:\t\t\t\t%f\t%f" % (sum_error, sum_error_square))
return sum_error_square
示例2: test_softmax_optimizations_w_bias2
def test_softmax_optimizations_w_bias2(self):
x = tensor.matrix('x')
b = tensor.vector('b')
c = tensor.vector('c')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
env = gof.Env(
[x, b, c, one_of_n],
[op(softmax(T.add(x,b,c)), one_of_n)])
assert env.outputs[0].owner.op == op
print 'BEFORE'
for node in env.toposort():
print node.op
print '----'
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(env)
print 'AFTER'
for node in env.toposort():
print node.op
print '===='
assert len(env.toposort()) == 3
assert str(env.outputs[0].owner.op) == 'OutputGuard'
assert env.outputs[0].owner.inputs[0].owner.op == crossentropy_softmax_argmax_1hot_with_bias
示例3: test_grad_lazy_if
def test_grad_lazy_if(self):
# Tests that we can compute the gradients through lazy if
x = tensor.vector('x', dtype=self.dtype)
y = tensor.vector('y', dtype=self.dtype)
c = tensor.iscalar('c')
z = ifelse(c, x, y)
gx, gy = tensor.grad(z.sum(), [x, y])
f = theano.function([c, x, y], [self.cast_output(gx),
self.cast_output(gy)],
mode=self.mode)
# There is only 2 of the 3 ifelse that are moved on the GPU.
# The one that stay on the CPU is for the shape.
self.assertFunctionContains(f, self.get_ifelse(1), min=2, max=3)
rng = numpy.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)
gx0, gy0 = f(1, vx, vy)
assert numpy.allclose(gx0.shape, vx.shape)
assert numpy.allclose(gy0.shape, vy.shape)
assert numpy.all(numpy.asarray(gx0) == 1.)
assert numpy.all(numpy.asarray(gy0) == 0.)
gx0, gy0 = f(0, vx, vy)
assert numpy.allclose(gx0.shape, vx.shape)
assert numpy.allclose(gy0.shape, vy.shape)
assert numpy.all(numpy.asarray(gx0) == 0.)
assert numpy.all(numpy.asarray(gy0) == 1.)
示例4: test_wrong_rcond_dimension
def test_wrong_rcond_dimension(self):
x = tensor.vector()
y = tensor.vector()
z = tensor.vector()
b = theano.tensor.nlinalg.lstsq()(x, y, z)
f = function([x, y, z], b)
self.assertRaises(np.linalg.LinAlgError, f, [2, 1], [2, 1], [2, 1])
示例5: _compile_func
def _compile_func():
beta = T.vector('beta')
b = T.scalar('b')
X = T.matrix('X')
y = T.vector('y')
C = T.scalar('C')
params = [beta, b, X, y, C]
cost = 0.5 * (T.dot(beta, beta) + b * b) + C * T.sum(
T.nnet.softplus(
-T.dot(T.diag(y), T.dot(X, beta) + b)
)
)
# Function computing in one go the cost, its gradient
# with regard to beta and with regard to the bias.
cost_grad = theano.function(params,[
cost,
T.grad(cost, beta),
T.grad(cost, b)
])
# Function for computing element-wise sigmoid, used for
# prediction.
log_predict = theano.function(
[beta, b, X],
T.nnet.sigmoid(b + T.dot(X, beta)),
on_unused_input='warn'
)
return (cost_grad, log_predict)
示例6: test_infer_shape
def test_infer_shape(self):
for ndim in [1, 3]:
x = T.TensorType(config.floatX, [False] * ndim)()
shp = (np.arange(ndim) + 1) * 3
a = np.random.random(shp).astype(config.floatX)
for axis in self._possible_axis(ndim):
for dtype in ["int8", "uint8", "uint64"]:
r_var = T.scalar(dtype=dtype)
r = np.asarray(3, dtype=dtype)
if dtype in self.numpy_unsupported_dtypes:
r_var = T.vector(dtype=dtype)
self.assertRaises(TypeError, repeat, x, r_var)
else:
self._compile_and_check([x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
r_var = T.vector(dtype=dtype)
if axis is None:
r = np.random.randint(
1, 6, size=a.size).astype(dtype)
elif a.size > 0:
r = np.random.randint(
1, 6, size=a.shape[axis]).astype(dtype)
else:
r = np.random.randint(
1, 6, size=(10,)).astype(dtype)
self._compile_and_check(
[x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
示例7: test_tagging
def test_tagging():
brick = TestBrick(0)
x = tensor.vector('x')
y = tensor.vector('y')
z = tensor.vector('z')
def check_output_variable(o):
assert get_application_call(o).application.brick is brick
assert (get_application_call(o.owner.inputs[0]).application.brick
is brick)
# Case 1: both positional arguments are provided.
u, v = brick.apply(x, y)
for o in [u, v]:
check_output_variable(o)
# Case 2: `b` is given as a keyword argument.
u, v = brick.apply(x, y=y)
for o in [u, v]:
check_output_variable(o)
# Case 3: two positional and one keyword argument.
u, v, w = brick.apply(x, y, z=z)
for o in [u, v, w]:
check_output_variable(o)
# Case 4: one positional argument.
u, v = brick.apply(x)
check_output_variable(u)
assert v == 1
# Case 5: variable was wrapped in a list. We can not handle that.
u, v = brick.apply([x])
assert_raises(AttributeError, check_output_variable, u)
示例8: test_grad_lazy_if
def test_grad_lazy_if(self):
# Tests that we can compute the gradients through lazy if
x = tensor.vector('x')
y = tensor.vector('y')
c = tensor.iscalar('c')
z = ifelse(c, x, y)
gx, gy = tensor.grad(z.sum(), [x, y])
f = theano.function([c, x, y], [gx, gy])
rng = numpy.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx = numpy.asarray(rng.uniform(size=(xlen,)), theano.config.floatX)
vy = numpy.asarray(rng.uniform(size=(ylen,)), theano.config.floatX)
gx0, gy0 = f(1, vx, vy)
assert numpy.allclose(gx0.shape, vx.shape)
assert numpy.allclose(gy0.shape, vy.shape)
assert numpy.all(gx0 == 1.)
assert numpy.all(gy0 == 0.)
gx0, gy0 = f(0, vx, vy)
assert numpy.allclose(gx0.shape, vx.shape)
assert numpy.allclose(gy0.shape, vy.shape)
assert numpy.all(gx0 == 0.)
assert numpy.all(gy0 == 1.)
示例9: test_lop_override
def test_lop_override(self, cls_ofg):
x = T.vector()
y = 1. / (1. + T.exp(-x))
def lop_ov(inps, outs, grads):
y_, = outs
dedy_, = grads
return [2. * y_ * (1. - y_) * dedy_]
y_, dedy = T.vector(), T.vector()
op_lop_ov = cls_ofg([x, y_, dedy], [2. * y_ * (1. - y_) * dedy])
xx = T.vector()
yy1 = T.sum(T.nnet.sigmoid(xx))
gyy1 = 2. * T.grad(yy1, xx)
for ov in [lop_ov, op_lop_ov]:
op = cls_ofg([x], [y], lop_overrides=ov)
yy2 = T.sum(op(xx))
gyy2 = T.grad(yy2, xx)
fn = function([xx], [gyy1, gyy2])
xval = np.random.rand(32).astype(config.floatX)
y1val, y2val = fn(xval)
assert np.allclose(y1val, y2val)
示例10: __init__
def __init__(self,dic_size,window,unit_id,tag_num,net_size,weight_decay,word_dim = 50, learning_rate = 0.1):
def f_softplus(x): return T.log(T.exp(x) + 1)# - np.log(2)
def f_rectlin(x): return x*(x>0)
def f_rectlin2(x): return x*(x>0) + 0.01 * x
nonlinear = {'tanh': T.tanh, 'sigmoid': T.nnet.sigmoid, 'softplus': f_softplus, 'rectlin': f_rectlin, 'rectlin2': f_rectlin2}
self.non_unit = nonlinear[unit_id]
self.weight_decay = weight_decay
self.tag_num = tag_num
self.window_size = window
self.learning_rate = learning_rate
self.worddim = word_dim
self.w, self.b, self.A = self.init_w(net_size,tag_num)
self.w2vtable = self.init_wtable(word_dim,dic_size)#table of word vectors
x = T.vector('x')
w = []
b = []
for i in range(len(self.w)):
w.append(T.matrix())
b.append(T.vector())
output = self.network(x,w,b)
og = []
for j in range(self.tag_num):
og.extend(T.grad(output[j],w+b+[x]))
self.outfunction = theano.function([x]+w+b, output)
self.goutfunction = theano.function([x]+w+b,[output]+og)
示例11: test_logpy
def test_logpy():
x = tensor.vector()
y = tensor.vector()
z = tensor.inc_subtensor(x[1:3], y)
node = z.owner
# otw theano chokes on var attributes when nose tries to print a traceback
# XXX this should be un-monkey-patched after the test runs by e.g. a
# context manager decorator
theano.gof.Apply.__repr__ = object.__repr__
theano.gof.Apply.__str__ = object.__str__
w = dict((name, var(name)) for name in [
'start', 'stop', 'step', 'set_instead_of_inc', 'inputs', 'outputs',
'inplace', 'whole_op', 'dta',
])
pattern = raw_init(theano.Apply,
op=raw_init(theano.tensor.IncSubtensor,
idx_list=[slice(w['start'], w['stop'], w['step'])],
inplace=w['inplace'],
set_instead_of_inc=w['set_instead_of_inc'],
destroyhandler_tolerate_aliased=w['dta']),
inputs=w['inputs'],
outputs=w['outputs'])
match, = run(0, w, (eq, node, pattern))
assert match['stop'] == 3
assert match['inputs'] == [x, y]
示例12: __init__
def __init__(self, C, D):
self.W = theano.shared(np.ones((C,D), dtype='float32'))
t_M = T.matrix('M', dtype='float32')
t_vM = T.vector('M', dtype='float32')
t_Y = T.vector('Y', dtype='float32')
t_I = T.vector('I', dtype='float32')
t_s = T.vector('s', dtype='float32')
t_eps = T.scalar('epsilon', dtype='float32')
self.input_integration = theano.function(
[t_Y],
T.dot(T.log(self.W),t_Y),
allow_input_downcast=True
)
self.M_summation = theano.function(
[t_M],
T.sum(t_M, axis=0),
allow_input_downcast=True
)
self.recurrent_softmax = theano.function(
[t_I,t_vM],
t_vM*T.exp(t_I)/T.sum(t_vM*T.exp(t_I)),
allow_input_downcast=True
)
self.weight_update = theano.function(
[t_Y,t_s,t_eps],
self.W,
updates={
self.W:
self.W + t_eps*(T.outer(t_s,t_Y) - t_s[:,np.newaxis]*self.W)
},
allow_input_downcast=True
)
self.epsilon = None
self._Y = None
self._s = None
示例13: test_rmsprop_0
def test_rmsprop_0():
# input
x = TT.vector(name='x')
B = theano.shared(floatX(np.ones((3, 5))), name='B')
c = theano.shared(floatX(np.ones(3)), name='c')
params = [B, c]
# output
y_pred = TT.nnet.softmax(TT.dot(B, x.T).T + c)
y_gold = TT.vector(name="y_gold")
# cost and grads
cost = TT.sum((y_pred - y_gold)**2)
grads = TT.grad(cost, wrt=params)
# funcs
cost_func, update_func, rms_params = rmsprop(params, grads,
[x], y_gold, cost)
# check return values
assert len(rms_params) == 4
assert isinstance(rms_params[0][0], TT.sharedvar.TensorSharedVariable)
assert not np.any(rms_params[0][0].get_value())
# check convergence
X = [floatX(np.random.rand(5)) for _ in xrange(N)]
Y = [floatX(np.random.rand(3)) for _ in xrange(N)]
icost = init_cost = end_cost = 0.
for i in xrange(MAX_I):
icost = 0.
for x, y in zip(X, Y):
icost += cost_func(x, y)
update_func()
if i == 0:
init_cost = icost
elif i == MAX_I - 1:
end_cost = icost
assert end_cost < init_cost
示例14: setup_decoder_step
def setup_decoder_step(self):
"""Advance the decoder by one step. Used at test time."""
y_t = T.lscalar('y_t_for_dec')
c_prev = T.vector('c_prev_for_dec')
h_prev = T.vector('h_prev_for_dec')
h_t = self.spec.f_dec(y_t, c_prev, h_prev)
self._decoder_step = theano.function(inputs=[y_t, c_prev, h_prev], outputs=h_t)
示例15: test_softmax_optimizations_w_bias_vector
def test_softmax_optimizations_w_bias_vector(self):
x = tensor.vector('x')
b = tensor.vector('b')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
fgraph = gof.FunctionGraph(
[x, b, one_of_n],
[op(softmax(x + b), one_of_n)])
assert fgraph.outputs[0].owner.op == op
#print 'BEFORE'
#for node in fgraph.toposort():
# print node.op
#print printing.pprint(node.outputs[0])
#print '----'
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
#print 'AFTER'
#for node in fgraph.toposort():
# print node.op
#print '===='
assert len(fgraph.toposort()) == 3
assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
crossentropy_softmax_argmax_1hot_with_bias)