本文整理汇总了Python中theano.tensor.reshape方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.reshape方法的具体用法?Python tensor.reshape怎么用?Python tensor.reshape使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.reshape方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def call(self,x,mask=None):
conv_input,theta = x
s = theta.shape
theta = T.reshape(theta,[-1,s[2]])
m = K.not_equal(conv_input,0.)
#### For translation
trans = _trans(theta)
output = _transform_trans(trans, conv_input)
output = output * K.cast(m,K.floatx())
### For rotation
M = _fusion(theta)
output = _transform_rot(M,output)
return output
开发者ID:microsoft,项目名称:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代码行数:18,代码来源:transform_rnn.py
示例2: set_output
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def set_output(self):
padding = self._padding
input_shape = self._input_shape
padded_input = tensor.alloc(0.0, # Value to fill the tensor
input_shape[0],
input_shape[1] + 2 * padding[1],
input_shape[2],
input_shape[3] + 2 * padding[3],
input_shape[4] + 2 * padding[4])
padded_input = tensor.set_subtensor(padded_input[:, padding[1]:padding[1] + input_shape[
1], :, padding[3]:padding[3] + input_shape[3], padding[4]:padding[4] + input_shape[4]],
self._prev_layer.output)
fc_output = tensor.reshape(
tensor.dot(self._fc_layer.output, self.Wx.val), self._output_shape)
self._output = conv3d2d.conv3d(padded_input, self.Wh.val) + \
fc_output + self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
示例3: test_elemwise_collapse
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def test_elemwise_collapse():
""" Test when all inputs have one(and the same) broadcastable dimension """
shape = (4, 5, 60)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle(0, 'x', 1, 2)
b = tcn.CudaNdarrayType((False, True, False, False))()
c = a3 + b
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(shape[0], 1, *shape[1:]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v)
# print "Expected collapse of all dimensions"
示例4: test_elemwise_collapse2
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def test_elemwise_collapse2():
""" Test when only one inputs have one broadcastable dimension """
shape = (4, 5, 9)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle(0, 'x', 1, 2)
b = tcn.CudaNdarrayType((False, False, False, False))()
c = a3 + b
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(shape[0], 5, *shape[1:]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v)
# print "Expected collapse to 3 dimensions"
示例5: test_elemwise_collapse3
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def test_elemwise_collapse3():
""" Test when only one inputs have two broadcastable dimension at each ends """
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape),
dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 0, 1, 'x')
b = tcn.CudaNdarrayType((False, False, False, False))()
c = (a3 + b)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v)
# print "Expected collapse to 3 dimensions"
示例6: test_elemwise_collapse4
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def test_elemwise_collapse4():
""" Test when only one inputs have two broadcastable dimension at
each ends and we add a scalar"""
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 0, 1, 'x')
b = tcn.CudaNdarrayType((False, False, False, False))()
c = (a3 + b + 2)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v + 2)
# print "Expected collapse to 3 dimensions"
示例7: test_elemwise_collapse6
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def test_elemwise_collapse6():
""" Test when all inputs have two broadcastable dimension at the
beginning"""
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 'x', 0, 1)
b = tcn.CudaNdarrayType((True, True, False, False))()
f = pfunc([b], [a3 + b], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(1, 1, shape[0], shape[1]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v)
# print "Expected collapse to c contiguous"
示例8: test_elemwise_collapse7
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def test_elemwise_collapse7(atol=1e-6):
""" Test when one input have one broadcastable dimension and the
other is a scalar"""
shape = (5, 4, 1)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a.copy(), 'a')
a3 = a2.dimshuffle(0, 'x', 1, 2)
f = pfunc([], [a3 + 2], mode=mode_with_gpu)
# let debugmode catch errors
out = f()[0]
ans = (a + 2).reshape(shape[0], 1, shape[1], shape[2])
assert numpy.allclose(out, ans, atol=atol)
# print "Expected collapse to c contiguous"
示例9: test_advinc_subtensor1
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def test_advinc_subtensor1():
""" Test the second case in the opt local_gpu_advanced_incsubtensor1 """
for shp in [(3, 3), (3, 3, 3)]:
shared = cuda.shared_constructor
xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1
yval = numpy.empty((2,) + shp[1:], dtype='float32')
yval[:] = 10
x = shared(xval, name='x')
y = T.tensor(dtype='float32',
broadcastable=(False,) * len(shp),
name='y')
expr = T.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[[0, 2]] += yval
utt.assert_allclose(rval, rep)
示例10: test_advset_subtensor1
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def test_advset_subtensor1():
""" Test GPU version of set_subtensor on vectors (uses GpuAdvancedIncSubtensor1) """
shp = (10,)
shared = cuda.shared_constructor
xval = numpy.arange(shp[0], dtype='float32').reshape(shp) + 1
idxs = numpy.array([0,2,5,7,3], dtype='int32')
yval = numpy.ones(len(idxs), dtype='float32')*10
x = shared(xval, name='x')
y = T.tensor(dtype='float32', broadcastable=(False,) * len(shp), name='y')
expr = T.advanced_set_subtensor1(x, y, idxs)
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[idxs] = yval
utt.assert_allclose(rval, rep)
示例11: test_advset_subtensor1_2d
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def test_advset_subtensor1_2d():
""" Test GPU version of set_subtensor on matrices (uses GpuAdvancedIncSubtensor1_dev20 if compute capability >= 2.0) """
shp = (10,5)
shared = cuda.shared_constructor
xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1
idxs = numpy.array([0,2,5,7,3], dtype='int32')
yval = numpy.ones((len(idxs), shp[1]), dtype='float32')*10
x = shared(xval, name='x')
y = T.tensor(dtype='float32', broadcastable=(False,) * len(shp), name='y')
expr = T.advanced_set_subtensor1(x, y, idxs)
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[idxs] = yval
utt.assert_allclose(rval, rep)
示例12: get_output_for
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def get_output_for(self, inputs, **kwargs):
Y, U = inputs
if Y.ndim > (self.axis + 1):
Y = Y.flatten(self.axis + 1)
assert Y.ndim == self.axis + 1
outer_YU = Y.dimshuffle(list(range(Y.ndim)) + ['x']) * U.dimshuffle([0] + ['x']*self.axis + [1])
bilinear = T.dot(outer_YU.reshape((-1, self.y_dim * self.u_dim)), self.Q.reshape((self.y_dim, self.y_dim * self.u_dim)).T)
if self.axis > 1:
bilinear = bilinear.reshape((-1,) + self.y_shape[:self.axis-1] + (self.y_dim,))
linear_u = T.dot(U, self.R.T)
if self.axis > 1:
linear_u = linear_u.dimshuffle([0] + ['x']*(self.axis-1) + [1])
linear_y = T.dot(Y, self.S.T)
if self.axis > 1:
linear_y = linear_y.reshape((-1,) + self.y_shape[:self.axis-1] + (self.y_dim,))
activation = bilinear + linear_u + linear_y
if self.b is not None:
activation += self.b.dimshuffle(['x']*self.axis + [0])
activation = activation.reshape((-1,) + self.y_shape)
return activation
示例13: get_output_for
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def get_output_for(self, input, **kwargs):
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input_reshape = input.flatten(2) if input.ndim > 2 else input
activation = T.dot(input_reshape, self.W_h)
if self.b_h is not None:
activation = activation + self.b_h.dimshuffle('x', 0)
activation = self.nonlinearity(activation)
transform = T.dot(input_reshape, self.W_t)
if self.b_t is not None:
transform = transform + self.b_t.dimshuffle('x', 0)
transform = nonlinearities.sigmoid(transform)
carry = 1.0 - transform
output = activation * transform + input_reshape * carry
# reshape output back to orignal input_shape
if input.ndim > 2:
output = T.reshape(output, input.shape)
return output
示例14: downsample
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def downsample(data):
data['_tr_X'] = np.zeros((len(data['tr_X']), 14*14), dtype='float32')
data['_va_X'] = np.zeros((len(data['va_X']), 14*14), dtype='float32')
data['_te_X'] = np.zeros((len(data['te_X']), 14*14), dtype='float32')
for i in xrange(0, len(data['tr_X'])):
data['_tr_X'][i] = block_reduce(data['tr_X'][i].reshape(data['shape_x']), block_size=(2,2), func=np.mean).flatten()
for i in xrange(0, len(data['va_X'])):
data['_va_X'][i] = block_reduce(data['va_X'][i].reshape(data['shape_x']), block_size=(2,2), func=np.mean).flatten()
for i in xrange(0, len(data['te_X'])):
data['_te_X'][i] = block_reduce(data['te_X'][i].reshape(data['shape_x']), block_size=(2,2), func=np.mean).flatten()
data['tr_X'] = data['_tr_X']
data['va_X'] = data['_va_X']
data['te_X'] = data['_te_X']
data['shape_x'] = (14,14)
data['n_x'] = 14*14
return data
示例15: depool
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import reshape [as 别名]
def depool(X, factor=2):
"""
Luke perforated upsample: http://www.brml.org/uploads/tx_sibibtex/281.pdf
"""
output_shape = [
X.shape[1],
X.shape[2]*factor,
X.shape[3]*factor
]
stride = X.shape[2]
offset = X.shape[3]
in_dim = stride * offset
out_dim = in_dim * factor * factor
upsamp_matrix = T.zeros((in_dim, out_dim))
rows = T.arange(in_dim)
cols = rows*factor + (rows/stride * factor * offset)
upsamp_matrix = T.set_subtensor(upsamp_matrix[rows, cols], 1.)
flat = T.reshape(X, (X.shape[0], output_shape[0], X.shape[2] * X.shape[3]))
up_flat = T.dot(flat, upsamp_matrix)
upsamp = T.reshape(up_flat, (X.shape[0], output_shape[0], output_shape[1], output_shape[2]))
return upsamp