本文整理汇总了Python中theano.tensor.matrix函数的典型用法代码示例。如果您正苦于以下问题:Python matrix函数的具体用法?Python matrix怎么用?Python matrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: funcs
def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, momentum=MOMENTUM, alpha=L2_CONSTANT):
"""
Method the returns the theano functions that are used in
training and testing. These are the train and predict functions.
The predict function returns out output of the network.
"""
# symbolic variables
X_batch = T.matrix()
y_batch = T.matrix()
# this is the cost of the network when fed throught the noisey network
l2 = lasagne.regularization.l2(X_batch)
train_output = lasagne.layers.get_output(network, X_batch)
cost = lasagne.objectives.mse(train_output, y_batch)
cost = cost.mean() #+ alpha*l2
# test the performance of the netowork without noise
test = lasagne.layers.get_output(network, X_batch, deterministic=True)
pred = T.argmax(test, axis=1)
accuracy = T.mean(T.eq(pred, y_batch), dtype=theano.config.floatX)
all_params = lasagne.layers.get_all_params(network)
updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)
train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)
valid = theano.function(inputs=[X_batch, y_batch], outputs=cost, allow_input_downcast=True)
predict = theano.function(inputs=[X_batch], outputs=pred, allow_input_downcast=True)
return dict(
train=train,
valid=valid,
predict=predict
)
示例2: __init__
def __init__(self, embedding_dim=100, num_hidden_layers=2, hidden_dim=200, in_dropout_p=0.2, hidden_dropout_p=0.5, update_hyperparams={'learning_rate': 0.01}):
self.embedding_dim = embedding_dim
self.num_hidden_layers = num_hidden_layers
self.hidden_dim = hidden_dim
self.in_dropout_p = in_dropout_p
self.hidden_dropout_p = update_hyperparams
print >> sys.stderr, 'Building computation graph for discriminator...'
self.input_var = T.matrix('input')
self.target_var = T.matrix('targer')
self.l_in = lasagne.layers.InputLayer(shape=(None, self.embedding_dim), input_var=T.tanh(self.input_var), name='l_in')
self.l_in_dr = lasagne.layers.DropoutLayer(self.l_in, 0.2)
self.layers = [self.l_in, self.l_in_dr]
for i in xrange(self.num_hidden_layers):
l_hid = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.layers[-1], num_units=self.hidden_dim, nonlinearity=lasagne.nonlinearities.leaky_rectify, W=lasagne.init.GlorotUniform(gain=leaky_relu_gain), name=('l_hid_%s' % i)))
l_hid_dr = lasagne.layers.DropoutLayer(l_hid, 0.5)
self.layers.append(l_hid)
self.layers.append(l_hid_dr)
self.l_preout = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.layers[-1], num_units=1, nonlinearity=None, name='l_preout'))
self.l_out = lasagne.layers.NonlinearityLayer(self.l_preout, nonlinearity=lasagne.nonlinearities.sigmoid, name='l_out')
self.prediction = lasagne.layers.get_output(self.l_out)
self.loss = lasagne.objectives.binary_crossentropy(self.prediction, self.target_var).mean()
self.accuracy = T.eq(T.ge(self.prediction, 0.5), self.target_var).mean()
self.params = lasagne.layers.get_all_params(self.l_out, trainable=True)
self.updates = lasagne.updates.adam(self.loss, self.params, **update_hyperparams)
print >> sys.stderr, 'Compiling discriminator...'
self.train_fn = theano.function([self.input_var, self.target_var], [self.loss, self.accuracy], updates=self.updates)
self.eval_fn = theano.function([self.input_var, self.target_var], [self.loss, self.accuracy])
示例3: Z_LSTM
def Z_LSTM(input_var, z_dim=256, nhid=512, layers=2, gradclip=10, training=True):
ret = {}
state_vars = []
ret['input'] = layer = nn.layers.InputLayer(input_var=input_var, shape=(None, None, z_dim))
batchsize, seqlen, _ = layer.input_var.shape
for lay in xrange(layers):
ret['drop_{}'.format(lay)] = layer = nn.layers.DropoutLayer(layer, p=0.3)
if training:
ret['lstm_{}'.format(lay)] = layer = LSTMSampleableLayer(layer, nhid,
grad_clipping=gradclip, learn_init=True)
else:
cell_var = T.matrix('cell_var_{}'.format(lay))
hid_var = T.matrix('hid_var_{}'.format(lay))
state_vars.append(cell_var)
state_vars.append(hid_var)
ret['lstm_{}'.format(lay)] = layer = LSTMSampleableLayer(layer, nhid,
cell_init=cell_var, hid_init=hid_var)
ret['cell_{}'.format(lay)] = nn.layers.SliceLayer(layer, axis=2,
indices=slice(None,nhid))
ret['hid_{}'.format(lay)] = layer = nn.layers.SliceLayer(layer, axis=2,
indices=slice(nhid,None))
ret['reshape'] = layer = nn.layers.ReshapeLayer(layer, (-1, nhid))
ret['project'] = layer = nn.layers.DenseLayer(layer, num_units=z_dim, nonlinearity=None)
ret['output'] = layer = nn.layers.ReshapeLayer(layer, (batchsize, seqlen, z_dim))
# final state slice layers for passing to next instance of lstm
for lay in xrange(layers):
ret['cellfinal_{}'.format(lay)] = nn.layers.SliceLayer(ret['cell_{}'.format(lay)],
axis=1, indices=-1)
ret['hidfinal_{}'.format(lay)] = nn.layers.SliceLayer(ret['hid_{}'.format(lay)],
axis=1, indices=-1)
return ret, state_vars
示例4: fine_train
def fine_train(nn,datasets,learning_Rate,batch_sizes,epochs):
train_set_x, train_set_y = datasets[0]
n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_sizes
train_label = T.cast(train_label,'float64')
index = T.lscalar()
x = T.matrix('x')
y = T.matrix('y')
min_batch_cost = []
if nn is None:
mynn = ForwordNN(x,y,n_in,n_out,hidden_sizes)
else:
mynn=nn
cost,update = mynn.get_cost_update(x,y,learning_Rate)
train_nn = theano.function([index],
cost,
updates = update,
givens = {
x:train_data[index*batch_sizes:(index+1)*batch_sizes,:],
y:train_label[index*batch_sizes:(index+1)*batch_sizes,:]
}
)
for num_epochs in range(epochs):
t1=time.time()
for num_batch in xrange(n_train_batchs):
min_batch_cost.append(train_nn(num_batch))
t2=time.time()
print 'The %d/%dth training,takes %f seconds,cost is %f' %(num_epochs+1,epochs,(t2-t1),np.mean(min_batch_cost))
return mynn
示例5: init_variables
def init_variables(self):
self.input_var = T.matrix('inputs')
self.side_var = T.matrix('contexts')
# do regression
#self.target_var = T.ivector('targets')
self.target_var = T.vector('targets')
self.num_classes = 1 # regression -> dim matters, not classes
示例6: test_string_var
def test_string_var(self):
orig_compute_test_value = theano.config.compute_test_value
try:
theano.config.compute_test_value = 'raise'
x = T.matrix('x')
x.tag.test_value = numpy.random.rand(3,4).astype(config.floatX)
y = T.matrix('y')
y.tag.test_value = numpy.random.rand(4,5).astype(config.floatX)
z = theano.shared(numpy.random.rand(5,6).astype(config.floatX))
# should work
out = T.dot(T.dot(x,y), z)
assert hasattr(out.tag, 'test_value')
tf = theano.function([x,y], out)
assert _allclose(
tf(x.tag.test_value, y.tag.test_value),
out.tag.test_value)
def f(x,y,z):
return T.dot(T.dot(x,y),z)
# this test should fail
z.set_value(numpy.random.rand(7,6).astype(config.floatX))
self.assertRaises(ValueError, f, x, y, z)
finally:
theano.config.compute_test_value = orig_compute_test_value
示例7: set_generation_function
def set_generation_function(recurrent_model, output_model):
# set input data (1*num_samples*features)
input_data = tensor.matrix(name='input_seq', dtype=floatX)
# set init hidden/cell(num_samples*hidden_size)
prev_hidden_data = tensor.matrix(name='prev_hidden_data', dtype=floatX)
prev_cell_data = tensor.matrix(name='prev_cell_data', dtype=floatX)
# get hidden data
recurrent_data = get_tensor_output(input=[input_data, prev_hidden_data, prev_cell_data], layers=recurrent_model, is_training=False)
cur_hidden_data = recurrent_data[0]
cur_cell_data = recurrent_data[1]
# get prediction data
output_data = get_tensor_output(input=cur_hidden_data, layers=output_model, is_training=False)
# input data
generation_function_inputs = [input_data,
prev_hidden_data,
prev_cell_data]
generation_function_outputs = [cur_hidden_data,
cur_cell_data,
output_data]
generation_function = theano.function(inputs=generation_function_inputs,
outputs=generation_function_outputs,
on_unused_input='ignore')
return generation_function
示例8: est_both_assert_merge_2_reverse
def est_both_assert_merge_2_reverse(self):
# Test case "test_both_assert_merge_2" but in reverse order
x1 = T.matrix('x1')
x2 = T.matrix('x2')
x3 = T.matrix('x3')
e = T.dot(x1, T.opt.assert_op(x2, (x2 > x3).all())) +\
T.dot(T.opt.assert_op(x1, (x1 > x3).all()), x2)
g = FunctionGraph([x1, x2, x3], [e])
MergeOptimizer().optimize(g)
strg = theano.printing.debugprint(g, file='str')
strref = '''Elemwise{add,no_inplace} [@A] '' 7
|dot [@B] '' 6
| |Assert{msg='Theano Assert failed!'} [@C] '' 5
| | |x1 [@D]
| | |All [@E] '' 3
| | |Elemwise{gt,no_inplace} [@F] '' 1
| | |x1 [@D]
| | |x3 [@G]
| |Assert{msg='Theano Assert failed!'} [@H] '' 4
| |x2 [@I]
| |All [@J] '' 2
| |Elemwise{gt,no_inplace} [@K] '' 0
| |x2 [@I]
| |x3 [@G]
|dot [@B] '' 6
'''
print(strg)
assert strg == strref, (strg, strref)
示例9: rebuild_nn
def rebuild_nn(nn_params):
W_e, W_p, W_o, b_o = read_obj(nn_params, 4)
mlp = MLPNoHid(W_e.get_value(), W_p.get_value(), W_o.get_value(), b_o.get_value())
wx = T.matrix('word', dtype='int32')
px = T.matrix('POS', dtype='int32')
f_pred = theano.function([wx, px], mlp.output(wx, px))
return f_pred
示例10: test_sequence_variable_inputs
def test_sequence_variable_inputs():
x, y = tensor.matrix(), tensor.matrix()
parallel_1 = Parallel(input_names=['input_1', 'input_2'],
input_dims=dict(input_1=4, input_2=5),
output_dims=dict(input_1=3, input_2=2),
prototype=Linear(), weights_init=Constant(2),
biases_init=Constant(1))
parallel_2 = Parallel(input_names=['input_1', 'input_2'],
input_dims=dict(input_1=3, input_2=2),
output_dims=dict(input_1=5, input_2=4),
prototype=Linear(), weights_init=Constant(2),
biases_init=Constant(1))
sequence = Sequence([parallel_1.apply, parallel_2.apply])
sequence.initialize()
new_x, new_y = sequence.apply(x, y)
x_val = numpy.ones((4, 4), dtype=theano.config.floatX)
y_val = numpy.ones((4, 5), dtype=theano.config.floatX)
assert_allclose(
new_x.eval({x: x_val}),
(x_val.dot(2 * numpy.ones((4, 3))) + numpy.ones((4, 3))).dot(
2 * numpy.ones((3, 5))) + numpy.ones((4, 5)))
assert_allclose(
new_y.eval({y: y_val}),
(y_val.dot(2 * numpy.ones((5, 2))) + numpy.ones((4, 2))).dot(
2 * numpy.ones((2, 4))) + numpy.ones((4, 4)))
示例11: build_model
def build_model(self):
######################
# BUILD ACTUAL MODEL #
######################
logger.info('... building the model')
U, W, V, bh, by = self.U, self.W, self.V, self.bh, self.by
x = T.matrix('x')
y = T.matrix('y')
def forward_prop_step(x_t, s_tm1, U, W, bh):
s_t = self.activation(T.dot(U, x_t) + T.dot(W, s_tm1) + bh)
return s_t
s, _ = theano.scan(
forward_prop_step,
sequences=x,
outputs_info=[dict(initial=T.zeros(self.hidden_dim))],
non_sequences=[U, W, bh],
mode='DebugMode')
p_y = T.nnet.softmax(T.dot(self.V, s[-1]) + by)
prediction = T.argmax(p_y, axis=1)
o_error = T.sum(T.nnet.categorical_crossentropy(p_y, y))
self.cost = o_error + self.L1_reg * self.L1 + self.L2_reg * self.L2_sqr
# Assign functions
self.forward_propagation = theano.function([x], s[-1])
self.predict = theano.function([x], prediction)
self.ce_error = theano.function([x, y], o_error)
l_r = T.scalar('l_r', dtype=theano.config.floatX) # learning rate (may change)
mom = T.scalar('mom', dtype=theano.config.floatX) # momentum
self.bptt, self.f_update = self.Momentum(x, y, l_r, mom)
示例12: test_infer_shape
def test_infer_shape(self):
admat = matrix()
bdmat = matrix()
admat_val = numpy.random.rand(3, 4).astype(config.floatX)
bdmat_val = numpy.random.rand(3, 4).astype(config.floatX)
self._compile_and_check([admat, bdmat], [SoftmaxGrad()(admat, bdmat)],
[admat_val, bdmat_val], SoftmaxGrad)
示例13: _setup_vars
def _setup_vars(self, sparse_input):
'''Setup Theano variables for our network.
Parameters
----------
sparse_input : bool
Not used -- sparse inputs are not supported for recurrent networks.
Returns
-------
vars : list of theano variables
A list of the variables that this network requires as inputs.
'''
_warn_dimshuffle()
assert not sparse_input, 'Theanets does not support sparse recurrent models!'
self.src = TT.ftensor3('src')
#self.src_mask = TT.imatrix('src_mask')
self.src_mask = TT.matrix('src_mask')
self.dst = TT.ftensor3('dst')
self.labels = TT.imatrix('labels')
self.weights = TT.matrix('weights')
if self.weighted:
return [self.src, self.src_mask, self.dst, self.labels, self.weights]
return [self.src, self.dst]
示例14: test_hgemm_swap
def test_hgemm_swap():
from theano.sandbox.cuda import nvcc_compiler
if nvcc_compiler.nvcc_version < '7.5':
raise SkipTest("SgemmEx is only avaialble on cuda 7.5+")
v = tensor.vector(dtype='float16')
m = tensor.matrix(dtype='float16')
m2 = tensor.matrix(dtype='float16')
m32 = tensor.matrix(dtype='float32')
# test that we don't try to replace anything but matrix x matrix in float16
f = theano.function([v, m], tensor.dot(v, m), mode=mode_with_gpu)
assert len([node for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuGemm)]) == 0
f = theano.function([m32, m], tensor.dot(m32, m), mode=mode_with_gpu)
assert len([node for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuGemm)]) == 0
f = theano.function([m, m2], tensor.dot(m, m2), mode=mode_with_gpu)
assert len([node for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuGemm)]) == 1
v1 = numpy.random.random((3, 4)).astype('float16')
v2 = numpy.random.random((4, 2)).astype('float16')
of = f(v1, v2)
on = numpy.dot(v1, v2)
utt.assert_allclose(of, on)
示例15: _construct_sample_from_prior
def _construct_sample_from_prior(self):
"""
Construct a function for drawing independent samples from the
distribution generated by this MultiStageModel. This function returns
the full sequence of "partially completed" examples.
"""
z_sym = T.matrix()
x_sym = T.matrix()
irs = self.ir_steps
oputs = [self.obs_transform(self.s0)]
oputs.extend([self.obs_transform(self.si[i]) for i in range(irs)])
_, hi_zmuv = self._construct_zmuv_samples(x_sym, 1)
sample_func = theano.function(inputs=[z_sym, x_sym], outputs=oputs, \
givens={ self.z: z_sym, \
self.x_in: T.zeros_like(x_sym), \
self.x_out: T.zeros_like(x_sym), \
self.hi_zmuv: hi_zmuv }, \
updates=self.scan_updates)
def prior_sampler(samp_count):
x_samps = to_fX( np.zeros((samp_count, self.obs_dim)) )
old_switch = self.train_switch.get_value(borrow=False)
# set model to generation mode
self.set_train_switch(switch_val=0.0)
z_samps = to_fX( npr.randn(samp_count, self.z_dim) )
model_samps = sample_func(z_samps, x_samps)
# set model back to either training or generation mode
self.set_train_switch(switch_val=old_switch)
return model_samps
return prior_sampler