本文整理匯總了Python中theano.tensor.as_tensor方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.as_tensor方法的具體用法?Python tensor.as_tensor怎麽用?Python tensor.as_tensor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.as_tensor方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: make_node
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import as_tensor [as 別名]
def make_node(self, frames, n, axis):
"""
Compute an n-point fft of frames along given axis.
"""
_frames = tensor.as_tensor(frames, ndim=2)
_n = tensor.as_tensor(n, ndim=0)
_axis = tensor.as_tensor(axis, ndim=0)
if self.half and _frames.type.dtype.startswith('complex'):
raise TypeError('Argument to HalfFFT must not be complex', frames)
spectrogram = tensor.zmatrix()
buf = generic()
# The `buf` output is present for future work
# when we call FFTW directly and re-use the 'plan' that FFTW creates.
# In that case, buf would store a CObject encapsulating the plan.
rval = Apply(self, [_frames, _n, _axis], [spectrogram, buf])
return rval
示例2: get_aggregator
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import as_tensor [as 別名]
def get_aggregator(self):
initialized = shared_like(0.)
expression_acc = shared_like(self.expression)
# Dummy default expression to use as the previously-accumulated
# value, that has the same shape as the new result
expression_zeros = tensor.as_tensor(self.expression).zeros_like()
conditional_update_expr = self.expression + ifelse(initialized,
expression_acc,
expression_zeros)
initialization_updates = [(expression_acc,
tensor.zeros_like(expression_acc)),
(initialized, 0.)]
accumulation_updates = [(expression_acc,
conditional_update_expr),
(initialized, 1.)]
aggregator = Aggregator(aggregation_scheme=self,
initialization_updates=initialization_updates,
accumulation_updates=accumulation_updates,
readout_variable=(expression_acc))
return aggregator
示例3: set_rest_ref_matrix
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import as_tensor [as 別名]
def set_rest_ref_matrix(self, number_of_points_per_surface):
ref_positions = T.cumsum(T.concatenate((T.stack([0]), number_of_points_per_surface[:-1] + 1)))
cum_rep = T.cumsum(T.concatenate((T.stack([0]), number_of_points_per_surface)))
ref_points_init = T.zeros((cum_rep[-1], 3))
ref_points_loop, update_ = theano.scan(self.repeat_list,
outputs_info=[ref_points_init],
sequences=[self.surface_points_all[ref_positions],
dict(input=cum_rep, taps=[0, 1])],
non_sequences=[T.as_tensor(3)],
return_list=False)
# ref_points_loop = theano.printing.Print('loop')(ref_points_loop)
ref_points = ref_points_loop[-1]
# ref_points = T.repeat(self.surface_points_all[ref_positions], number_of_points_per_surface, axis=0)
rest_mask = T.ones(T.stack([self.surface_points_all.shape[0]]), dtype='int16')
rest_mask = T.set_subtensor(rest_mask[ref_positions], 0)
rest_mask = T.nonzero(rest_mask)[0]
rest_points = self.surface_points_all[rest_mask]
return [ref_points, rest_points, ref_positions, rest_mask]
示例4: set_nugget_surface_points
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import as_tensor [as 別名]
def set_nugget_surface_points(self, ref_positions, rest_mask, number_of_points_per_surface):
# ref_nugget = T.repeat(self.nugget_effect_scalar_T[ref_positions], number_of_points_per_surface)
cum_rep = T.cumsum(T.concatenate((T.stack([0]), number_of_points_per_surface)))
ref_nugget_init = T.zeros((cum_rep[-1], 1))
ref_nugget_loop, update_ = theano.scan(self.repeat_list,
outputs_info=[ref_nugget_init],
sequences=[self.nugget_effect_scalar_T[ref_positions],
dict(input=cum_rep, taps=[0, 1])],
non_sequences=[T.as_tensor(1)],
return_list=False)
# ref_nugget_loop = theano.printing.Print('loop')(ref_nugget_loop)
ref_nugget = ref_nugget_loop[-1]
rest_nugget = self.nugget_effect_scalar_T[rest_mask]
nugget_rest_ref = ref_nugget.reshape((1, -1))[0] + rest_nugget
return nugget_rest_ref
示例5: infer_shape
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import as_tensor [as 別名]
def infer_shape(self, node, shapes):
return [shapes[0] + (tt.as_tensor(self.N),)]
示例6: max_pool
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import as_tensor [as 別名]
def max_pool(images, imgshp, maxpoolshp):
"""Implements a max pooling layer
Takes as input a 2D tensor of shape batch_size x img_size and
performs max pooling. Max pooling downsamples by taking the max
value in a given area, here defined by maxpoolshp. Outputs a 2D
tensor of shape batch_size x output_size.
:param images: 2D tensor containing images on which to apply convolution.
Assumed to be of shape batch_size x img_size
:param imgshp: tuple containing image dimensions
:param maxpoolshp: tuple containing shape of area to max pool over
:return: out1, symbolic result (2D tensor)
:return: out2, logical shape of the output
"""
N = numpy
poolsize = N.int64(N.prod(maxpoolshp))
# imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
# in the first case, default nfeatures to 1
if N.size(imgshp) == 2:
imgshp = (1,) + imgshp
# construct indices and index pointers for sparse matrix, which,
# when multiplied with input images will generate a stack of image
# patches
indices, indptr, spmat_shape, sptype, outshp = \
convolution_indices.conv_eval(imgshp, maxpoolshp,
maxpoolshp, mode='valid')
# print 'XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX'
# print 'imgshp = ', imgshp
# print 'maxpoolshp = ', maxpoolshp
# print 'outshp = ', outshp
# build sparse matrix, then generate stack of image patches
csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices,
indptr, spmat_shape)
patches = sparse.structured_dot(csc, images.T).T
pshape = tensor.stack([images.shape[0] *\
tensor.as_tensor(N.prod(outshp)),
tensor.as_tensor(imgshp[0]),
tensor.as_tensor(poolsize)])
patch_stack = tensor.reshape(patches, pshape, ndim=3)
out1 = tensor.max(patch_stack, axis=2)
pshape = tensor.stack([images.shape[0],
tensor.as_tensor(N.prod(outshp)),
tensor.as_tensor(imgshp[0])])
out2 = tensor.reshape(out1, pshape, ndim=3)
out3 = tensor.DimShuffle(out2.broadcastable, (0, 2, 1))(out2)
return tensor.flatten(out3, 2), outshp
示例7: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import as_tensor [as 別名]
def __init__(self, rng, inputVar, cfgParams, copyLayer=None, layerNum=None):
"""
Allocate a PoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type inputVar: theano.tensor.dtensor4
:param inputVar: symbolic image tensor, of shape image_shape
:type cfgParams: PoolLayerParams
"""
floatX = theano.config.floatX # @UndefinedVariable
outputDim = cfgParams.outputDim
poolsize = cfgParams.poolsize
inputDim = cfgParams.inputDim
activation = cfgParams.activation
poolType = cfgParams.poolType
self.cfgParams = cfgParams
self.layerNum = layerNum
self.inputVar = inputVar
if inputVar.type.ndim != 4:
raise TypeError()
self.params = []
self.weights = []
# downsample each feature map individually, using maxpooling
if poolType == 0:
# use maxpooling
pooled_out = pool_2d(input=self.inputVar, ds=poolsize, ignore_border=True)
elif poolType == 1:
# use average pooling
pooled_out = theano.sandbox.neighbours.images2neibs(ten4=self.inputVar, neib_shape=poolsize, mode='ignore_borders').mean(axis=-1)
new_shape = T.cast(T.join(0, self.inputVar.shape[:-2], T.as_tensor([self.inputVar.shape[2]//poolsize[0]]), T.as_tensor([self.inputVar.shape[3]//poolsize[1]])), 'int64')
pooled_out = T.reshape(pooled_out, new_shape, ndim=4)
elif poolType == 3:
# use subsampling and ignore border
pooled_out = self.inputVar[:, :, :(inputDim[2]//poolsize[0])*poolsize[0], :(inputDim[3]//poolsize[1])*poolsize[1]][:, :, ::poolsize[0], ::poolsize[1]]
elif poolType == -1:
# no pooling at all
pooled_out = self.inputVar
else:
raise ValueError("Unknown pool type!")
self.output = (pooled_out if activation is None
else activation(pooled_out))
self.output.name = 'output_layer_{}'.format(self.layerNum)
示例8: updates
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import as_tensor [as 別名]
def updates(self, gradients):
"""
Return symbolic updates to apply given a set of gradients
on the parameters being optimized.
Parameters
----------
gradients : list of tensor_likes
List of symbolic gradients for the parameters contained
in self.params, in the same order as in self.params.
Returns
-------
updates : dict
A dictionary with the shared variables in self.params as keys
and a symbolic expression of how they are to be updated each
SGD step as values.
Notes
-----
`cost_updates` is a convenient helper function that takes all
necessary gradients with respect to a given symbolic cost.
"""
ups = {}
# Add the learning rate/iteration updates
l_ups, learn_rates = self.learning_rate_updates(gradients)
safe_update(ups, l_ups)
# Get the updates from sgd_updates, a PyLearn library function.
p_up = dict(self.sgd_updates(self.params, gradients, learn_rates))
# Add the things in p_up to ups
safe_update(ups, p_up)
# Clip the values if needed.
# We do not want the clipping values to force an upcast
# of the update: updates should have the same type as params
for param, (p_min, p_max) in six.iteritems(self.clipping_values):
p_min = tensor.as_tensor(p_min)
p_max = tensor.as_tensor(p_max)
dtype = param.dtype
if p_min.dtype != dtype:
p_min = tensor.cast(p_min, dtype)
if p_max.dtype != dtype:
p_max = tensor.cast(p_max, dtype)
ups[param] = tensor.clip(ups[param], p_min, p_max)
# Return the updates dictionary.
return ups