本文整理汇总了Python中theano.tensor方法的典型用法代码示例。如果您正苦于以下问题:Python theano.tensor方法的具体用法?Python theano.tensor怎么用?Python theano.tensor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano
的用法示例。
在下文中一共展示了theano.tensor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: broadcast_concat
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def broadcast_concat(tensors, axis):
"""
Broadcast tensors together, then concatenate along axis
"""
ndim = tensors[0].ndim
assert all(t.ndim == ndim for t in tensors), "ndims don't match for broadcast_concat: {}".format(tensors)
broadcast_shapes = []
for i in range(ndim):
if i == axis:
broadcast_shapes.append(1)
else:
dim_size = next((t.shape[i] for t in tensors if not t.broadcastable[i]), 1)
broadcast_shapes.append(dim_size)
broadcasted_tensors = []
for t in tensors:
tile_reps = [bshape if t.broadcastable[i] else 1 for i,bshape in enumerate(broadcast_shapes)]
if all(rep is 1 for rep in tile_reps):
# Don't need to broadcast this tensor
broadcasted_tensors.append(t)
else:
broadcasted_tensors.append(T.tile(t, tile_reps))
return T.concatenate(broadcasted_tensors, axis)
示例2: reduce_log_sum
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def reduce_log_sum(tensor, axis=None, guaranteed_finite=False):
"""
Sum probabilities in the log domain, i.e return
log(e^vec[0] + e^vec[1] + ...)
= log(e^x e^(vec[0]-x) + e^x e^(vec[1]-x) + ...)
= log(e^x [e^(vec[0]-x) + e^(vec[1]-x) + ...])
= log(e^x) + log(e^(vec[0]-x) + e^(vec[1]-x) + ...)
= x + log(e^(vec[0]-x) + e^(vec[1]-x) + ...)
For numerical stability, we choose x = max(vec)
Note that if x is -inf, that means all values are -inf,
so the answer should be -inf. In this case, choose x = 0
"""
maxval = T.max(tensor, axis)
maxval_full = T.max(tensor, axis, keepdims=True)
if not guaranteed_finite:
maxval = T.switch(T.isfinite(maxval), maxval, T.zeros_like(maxval))
maxval_full = T.switch(T.isfinite(maxval_full), maxval_full, T.zeros_like(maxval_full))
reduced_sum = T.sum(T.exp(tensor - maxval_full), axis)
logsum = maxval + T.log(reduced_sum)
return logsum
示例3: errors
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
示例4: __init__
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def __init__(self, random_seed=dt.datetime.now().microsecond, compute_grad=True):
self.rng = np.random.RandomState(random_seed)
self.batch_size = cfg.CONST.BATCH_SIZE
self.img_w = cfg.CONST.IMG_W
self.img_h = cfg.CONST.IMG_H
self.n_vox = cfg.CONST.N_VOX
self.compute_grad = compute_grad
# (self.batch_size, 3, self.img_h, self.img_w),
# override x and is_x_tensor4 when using multi-view network
self.x = tensor.tensor4()
self.is_x_tensor4 = True
# (self.batch_size, self.n_vox, 2, self.n_vox, self.n_vox),
self.y = tensor5()
self.activations = [] # list of all intermediate activations
self.loss = [] # final loss
self.output = [] # final output
self.error = [] # final output error
self.params = [] # all learnable params
self.grads = [] # will be filled out automatically
self.setup()
示例5: set_output
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def set_output(self):
output_shape = self._output_shape
padding = self._padding
unpool_size = self._unpool_size
unpooled_output = tensor.alloc(0.0, # Value to fill the tensor
output_shape[0],
output_shape[1] + 2 * padding[0],
output_shape[2],
output_shape[3] + 2 * padding[1],
output_shape[4] + 2 * padding[2])
unpooled_output = tensor.set_subtensor(unpooled_output[:, padding[0]:output_shape[
1] + padding[0]:unpool_size[0], :, padding[1]:output_shape[3] + padding[1]:unpool_size[
1], padding[2]:output_shape[4] + padding[2]:unpool_size[2]],
self._prev_layer.output)
self._output = unpooled_output
示例6: shared_dropout_layer
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def shared_dropout_layer(shape, use_noise, trng, value, scaled=True):
#re-scale dropout at training time, so we don't need to at test time
if scaled:
proj = tensor.switch(
use_noise,
trng.binomial(shape, p=value, n=1,
dtype='float32')/value,
theano.shared(numpy.float32(1.)))
else:
proj = tensor.switch(
use_noise,
trng.binomial(shape, p=value, n=1,
dtype='float32'),
theano.shared(numpy.float32(value)))
return proj
# feedforward layer: affine transformation + point-wise nonlinearity
示例7: build_encoder_bi
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def build_encoder_bi(tparams, options):
"""
build bidirectional encoder, given pre-computed word embeddings
"""
# word embedding (source)
embedding = tensor.tensor3('embedding', dtype='float32')
embeddingr = embedding[::-1]
x_mask = tensor.matrix('x_mask', dtype='float32')
xr_mask = x_mask[::-1]
# encoder
proj = get_layer(options['encoder'])[1](tparams, embedding, options,
prefix='encoder',
mask=x_mask)
projr = get_layer(options['encoder'])[1](tparams, embeddingr, options,
prefix='encoder_r',
mask=xr_mask)
ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1)
return embedding, x_mask, ctx
# some utilities
示例8: __init__
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def __init__(self, layers, mode='sum'):
''' Merge the output of a list of layers or containers into a single tensor.
mode: {'sum', 'concat'}
'''
if len(layers) < 2:
raise Exception("Please specify two or more input layers (or containers) to merge")
self.mode = mode
self.layers = layers
self.params = []
self.regularizers = []
self.constraints = []
for l in self.layers:
params, regs, consts = l.get_params()
self.regularizers += regs
# params and constraints have the same size
for p, c in zip(params, consts):
if p not in self.params:
self.params.append(p)
self.constraints.append(c)
示例9: adadelta
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def adadelta(lr, tparams, grads, inp, cost):
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rup2'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
rg2_new = [0.95 * rg2 + 0.05 * (g ** 2) for rg2, g in zip(running_grads2, grads)]
rg2up = [(rg2, r_n) for rg2, r_n in zip(running_grads2, rg2_new)]
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(grads, running_up2, rg2_new)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
inp += [lr]
f_update = theano.function(inp, cost, updates=rg2up+ru2up+param_up, on_unused_input='ignore', profile=profile)
return f_update
示例10: debugging_adadelta
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def debugging_adadelta(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rup2'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up, profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
示例11: timeit_2vector_theano
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def timeit_2vector_theano(init, nb_element=1e6, nb_repeat=3, nb_call=int(1e2), expr="a**2 + b**2 + 2*a*b"):
t3 = timeit.Timer("tf(av,bv)",
"""
import theano
import theano.tensor as T
import numexpr as ne
from theano.tensor import exp
%(init)s
av=a
bv=b
a=T.dvector()
b=T.dvector()
tf= theano.function([a,b],%(expr)s)
"""%locals()
)
ret=t3.repeat(nb_repeat,nb_call)
return np.asarray(ret)
示例12: local_csm_properties_csm
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def local_csm_properties_csm(node):
"""
If we find csm_properties(CSM(*args)), then we can replace that with the
*args directly.
"""
if node.op == csm_properties:
csm, = node.inputs
if csm.owner and (csm.owner.op == CSC or csm.owner.op == CSR):
# csm.owner.inputs could be broadcastable. In that case, we have
# to adjust the broadcasting flag here.
ret_var = [theano.tensor.patternbroadcast(i, o.broadcastable)
for i, o in izip(csm.owner.inputs, node.outputs)]
return ret_var
return False
示例13: make_node
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def make_node(self, x, y):
x, y = sparse.as_sparse_variable(x), tensor.as_tensor_variable(y)
out_dtype = scalar.upcast(x.type.dtype, y.type.dtype)
if self.inplace:
assert out_dtype == y.dtype
indices, indptr, data = csm_indices(x), csm_indptr(x), csm_data(x)
# We either use CSC or CSR depending on the format of input
assert self.format == x.type.format
# The magic number two here arises because L{scipy.sparse}
# objects must be matrices (have dimension 2)
assert y.type.ndim == 2
out = tensor.TensorType(dtype=out_dtype,
broadcastable=y.type.broadcastable)()
return gof.Apply(self,
[data, indices, indptr, y],
[out])
示例14: link
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def link(self, input):
"""
The input has to be a tensor with the right
most dimension equal to input_dim.
"""
self.input = input
self.linear_output = T.dot(self.input, self.weights)
if self.bias:
self.linear_output = self.linear_output + self.bias
if self.activation is None:
self.output = self.linear_output
else:
self.output = self.activation(self.linear_output)
return self.output
#}}}
示例15: get_initial_states
# 需要导入模块: import theano [as 别名]
# 或者: from theano import tensor [as 别名]
def get_initial_states(self, x):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(x) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1, self.output_dim]) # (samples, output_dim)
initial_states = [initial_state for _ in range(len(self.states))]
return initial_states