本文整理汇总了Python中theano.tensor.shape方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.shape方法的具体用法?Python tensor.shape怎么用?Python tensor.shape使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.shape方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sp_ones_like
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def sp_ones_like(x):
"""
Construct a sparse matrix of ones with the same sparsity pattern.
Parameters
----------
x
Sparse matrix to take the sparsity pattern.
Returns
-------
A sparse matrix
The same as `x` with data changed for ones.
"""
# TODO: don't restrict to CSM formats
data, indices, indptr, shape = csm_properties(x)
return CSM(format=x.format)(tensor.ones_like(data), indices, indptr, shape)
示例2: sp_zeros_like
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def sp_zeros_like(x):
"""
Construct a sparse matrix of zeros.
Parameters
----------
x
Sparse matrix to take the shape.
Returns
-------
A sparse matrix
The same as `x` with zero entries for all element.
"""
# TODO: don't restrict to CSM formats
_, _, indptr, shape = csm_properties(x)
return CSM(format=x.format)(data=numpy.array([], dtype=x.type.dtype),
indices=numpy.array([], dtype='int32'),
indptr=tensor.zeros_like(indptr),
shape=shape)
示例3: perform
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def perform(self, node, inputs, outputs):
# for efficiency, if remap does nothing, then do not apply it
(data, indices, indptr, shape) = inputs
(out,) = outputs
if len(shape) != 2:
raise ValueError('Shape should be an array of length 2')
if data.shape != indices.shape:
errmsg = ('Data (shape ' + repr(data.shape) +
' must have the same number of elements ' +
'as indices (shape' + repr(indices.shape) +
')')
raise ValueError(errmsg)
if self.format == 'csc':
out[0] = scipy.sparse.csc_matrix((data, indices.copy(),
indptr.copy()),
numpy.asarray(shape), copy=False)
else:
assert self.format == 'csr'
out[0] = scipy.sparse.csr_matrix((data, indices.copy(),
indptr.copy()), shape.copy(),
copy=False)
示例4: grad
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def grad(self, inputs, gout):
(gz,) = gout
is_continuous = [(inputs[i].dtype in tensor.continuous_dtypes)
for i in range(len(inputs))]
if _is_sparse_variable(gz):
gz = dense_from_sparse(gz)
split = tensor.Split(len(inputs))(gz, 0,
tensor.stack(
[x.shape[0]
for x in inputs]))
if not isinstance(split, list):
split = [split]
derivative = [SparseFromDense(self.format)(s) for s in split]
def choose(continuous, derivative):
if continuous:
return derivative
else:
return None
return [choose(c, d) for c, d in zip(is_continuous, derivative)]
示例5: structured_monoid
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def structured_monoid(tensor_op):
# Generic operation to perform many kinds of monoid element-wise
# operations on the non-zeros of a sparse matrix.
# The first parameter must always be a sparse matrix. The other parameters
# must be scalars which will be passed as argument to the tensor_op.
def decorator(f):
def wrapper(*args):
x = as_sparse_variable(args[0])
assert x.format in ["csr", "csc"]
xs = [scalar.as_scalar(arg) for arg in args[1:]]
data, ind, ptr, shape = csm_properties(x)
data = tensor_op(data, *xs)
return CSM(x.format)(data, ind, ptr, shape)
wrapper.__name__ = str(tensor_op.scalar_op)
return wrapper
return decorator
示例6: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def __init__(self, input_var=None, num_styles=None, shape=(None, 3, 256, 256), net_type=1, **kwargs):
"""
net_type: 0 (fast neural style- fns) or 1 (conditional instance norm- cin)
"""
assert net_type in [0, 1]
self.net_type = net_type
self.network = {}
if len(shape) == 2:
shape=(None, 3, shape[0], shape[1])
elif len(shape) == 3:
shape=(None, shape[0], shape[1], shape[2])
self.shape = shape
self.num_styles = num_styles
self.network['loss_net'] = {}
self.setup_loss_net()
self.load_loss_net_weights()
self.network['transform_net'] = {}
self.setup_transform_net(input_var)
示例7: setup_transform_net
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def setup_transform_net(self, input_var=None):
transform_net = InputLayer(shape=self.shape, input_var=input_var)
transform_net = style_conv_block(transform_net, self.num_styles, 32, 9, 1)
transform_net = style_conv_block(transform_net, self.num_styles, 64, 3, 2)
transform_net = style_conv_block(transform_net, self.num_styles, 128, 3, 2)
for _ in range(5):
transform_net = residual_block(transform_net, self.num_styles)
transform_net = nn_upsample(transform_net, self.num_styles)
transform_net = nn_upsample(transform_net, self.num_styles)
if self.net_type == 0:
transform_net = style_conv_block(transform_net, self.num_styles, 3, 9, 1, tanh)
transform_net = ExpressionLayer(transform_net, lambda X: 150.*X, output_shape=None)
elif self.net_type == 1:
transform_net = style_conv_block(transform_net, self.num_styles, 3, 9, 1, sigmoid)
self.network['transform_net'] = transform_net
示例8: perform
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def perform(self, node, inputs, outputs):
(x, s) = inputs
(z,) = outputs
M, N = x.shape
assert x.format == 'csc'
assert s.shape == (M,)
indices = x.indices
indptr = x.indptr
y_data = x.data.copy()
for j in xrange(0, N):
for i_idx in xrange(indptr[j], indptr[j + 1]):
y_data[i_idx] *= s[indices[i_idx]]
z[0] = scipy.sparse.csc_matrix((y_data, indices, indptr), (M, N))
示例9: build_transition_cost
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def build_transition_cost(logits, targets, num_transitions):
"""
Build a parse action prediction cost function.
"""
# swap seq_length dimension to front so that we can scan per timestep
logits = T.swapaxes(logits, 0, 1)
targets = targets.T
def cost_t(logits, tgt, num_transitions):
# TODO(jongauthier): Taper down xent cost as we proceed through
# sequence?
predicted_dist = T.nnet.softmax(logits)
cost = T.nnet.categorical_crossentropy(predicted_dist, tgt)
pred = T.argmax(logits, axis=1)
error = T.neq(pred, tgt)
return cost, error
results, _ = theano.scan(cost_t, [logits, targets], non_sequences=[num_transitions])
costs, errors = results
# Create a mask that selects only transitions that involve real data.
unrolling_length = T.shape(costs)[0]
padding = unrolling_length - num_transitions
padding = T.reshape(padding, (1, -1))
rng = T.arange(unrolling_length) + 1
rng = T.reshape(rng, (-1, 1))
mask = T.gt(rng, padding)
# Compute acc using the mask
acc = 1.0 - (T.sum(errors * mask, dtype=theano.config.floatX)
/ T.sum(num_transitions, dtype=theano.config.floatX))
# Compute cost directly, since we *do* want a cost incentive to get the padding
# transitions right.
cost = T.mean(costs)
return cost, acc
示例10: __eq__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def __eq__(self, other):
(a, b), (x, y) = self, other
return (a == x and
(b.dtype == y.dtype) and
(type(b) == type(y)) and
(b.shape == y.shape) and
(abs(b - y).sum() < 1e-6 * b.nnz))
示例11: __str__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def __str__(self):
return '%s{%s,%s,shape=%s,nnz=%s}' % (
self.__class__.__name__,
self.format,
self.dtype,
self.data.shape,
self.data.nnz)
示例12: csm_shape
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def csm_shape(csm):
"""
Return the shape field of the sparse variable.
"""
return csm_properties(csm)[3]
示例13: make_node
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import shape [as 别名]
def make_node(self, data, indices, indptr, shape):
data = tensor.as_tensor_variable(data)
if not isinstance(indices, gof.Variable):
indices_ = numpy.asarray(indices)
indices_32 = theano._asarray(indices, dtype='int32')
assert (indices_ == indices_32).all()
indices = indices_32
if not isinstance(indptr, gof.Variable):
indptr_ = numpy.asarray(indptr)
indptr_32 = theano._asarray(indptr, dtype='int32')
assert (indptr_ == indptr_32).all()
indptr = indptr_32
if not isinstance(shape, gof.Variable):
shape_ = numpy.asarray(shape)
shape_32 = theano._asarray(shape, dtype='int32')
assert (shape_ == shape_32).all()
shape = shape_32
indices = tensor.as_tensor_variable(indices)
indptr = tensor.as_tensor_variable(indptr)
shape = tensor.as_tensor_variable(shape)
if data.type.ndim != 1:
raise TypeError('data argument must be a vector', data.type,
data.type.ndim)
if indices.type.ndim != 1 or indices.type.dtype not in discrete_dtypes:
raise TypeError('indices must be vector of integers', indices,
indices.type)
if indptr.type.ndim != 1 or indptr.type.dtype not in discrete_dtypes:
raise TypeError('indices must be vector of integers', indptr,
indptr.type)
if shape.type.ndim != 1 or shape.type.dtype not in discrete_dtypes:
raise TypeError('n_rows must be integer type', shape, shape.type)
return gof.Apply(self,
[data, indices, indptr, shape],
[SparseType(dtype=data.type.dtype,
format=self.format)()])