本文整理汇总了Python中pylearn2.sandbox.cuda_convnet.filter_acts.FilterActs.dimshuffle方法的典型用法代码示例。如果您正苦于以下问题:Python FilterActs.dimshuffle方法的具体用法?Python FilterActs.dimshuffle怎么用?Python FilterActs.dimshuffle使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pylearn2.sandbox.cuda_convnet.filter_acts.FilterActs
的用法示例。
在下文中一共展示了FilterActs.dimshuffle方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: lmul
# 需要导入模块: from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs [as 别名]
# 或者: from pylearn2.sandbox.cuda_convnet.filter_acts.FilterActs import dimshuffle [as 别名]
def lmul(self, x):
"""
dot(x, A)
aka, do convolution with input image x
"""
check_cuda(str(type(self)) + ".lmul")
# TODO Why is it CPU??
print "Por que?!?!", type(x)
cpu = "Cuda" not in str(type(x))
if cpu:
x = gpu_from_host(x)
assert x.ndim == 5
x_axes = self.input_axes
assert len(x_axes) == 5
op_axes = ("c", 0, 1, "t", "b")
if tuple(x_axes) != op_axes:
print "ssssssssssssssss"
x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes])
_x_4d_shape = (
self.signal_shape[0],
self.signal_shape[1],
self.signal_shape[2],
self.signal_shape[3] * self.signal_shape[4],
)
x = x.reshape(_x_4d_shape)
x = gpu_contiguous(x)
rval = FilterActs(self.pad, self.partial_sum, self.kernel_stride[0])(x, self._filters)
if cpu:
rval = host_from_gpu(rval)
rval = rval.reshape(
(
self.filter_shape[3],
self.filter_shape[4],
rval.shape[1],
rval.shape[2],
self.signal_shape[3],
self.signal_shape[4],
)
)
rval = diagonal_subtensor(rval, 4, 0).sum(axis=0)
# Format the output based on the output space
rval_axes = self.output_axes
assert len(rval_axes) == 5
if tuple(rval_axes) != op_axes:
rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes])
return rval
示例2: lmul
# 需要导入模块: from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs [as 别名]
# 或者: from pylearn2.sandbox.cuda_convnet.filter_acts.FilterActs import dimshuffle [as 别名]
def lmul(self, x):
"""
.. todo::
WRITEME properly
dot(x, A)
aka, do convolution with input image x
"""
check_cuda(str(type(self)) + ".lmul")
cpu = 'Cuda' not in str(type(x))
if cpu:
x = gpu_from_host(x)
# x must be formatted as channel, topo dim 0, topo dim 1, batch_index
# for use with FilterActs
assert x.ndim == 4
x_axes = self.input_axes
assert len(x_axes) == 4
op_axes = ('c', 0, 1, 'b')
if tuple(x_axes) != op_axes:
x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes])
x = gpu_contiguous(x)
# Patch old pickle files.
if not hasattr(self, 'kernel_stride'):
self.kernel_stride = (1, 1)
rval = FilterActs(self.pad, self.partial_sum, self.kernel_stride[0])(
x,
self._filters
)
# Format the output based on the output space
rval_axes = self.output_axes
assert len(rval_axes) == 4
if cpu:
rval = host_from_gpu(rval)
if tuple(rval_axes) != op_axes:
rval = rval.dimshuffle(*[op_axes.index(axis)
for axis in rval_axes])
return rval
示例3: lmul
# 需要导入模块: from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs [as 别名]
# 或者: from pylearn2.sandbox.cuda_convnet.filter_acts.FilterActs import dimshuffle [as 别名]
def lmul(self, x):
"""
dot(x, A)
aka, do convolution with input image x
"""
cpu = 'Cuda' not in str(type(x))
if cpu:
x = gpu_from_host(x)
# x must be formatted as channel, topo dim 0, topo dim 1, batch_index
# for use with FilterActs
assert x.ndim == 4
x_axes = self.input_axes
assert len(x_axes) == 4
op_axes = ('c', 0, 1, 'b')
if tuple(x_axes) != op_axes:
x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes])
x = gpu_contiguous(x)
rval = FilterActs(self.pad, self.partial_sum)(x, self._filters)
# Format the output based on the output space
rval_axes = self.output_axes
assert len(rval_axes) == 4
if tuple(rval_axes) != op_axes:
rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes])
if cpu:
rval = host_from_gpu(rval)
return rval