本文整理汇总了Python中theano.sandbox.cuda.basic_ops.gpu_contiguous方法的典型用法代码示例。如果您正苦于以下问题:Python basic_ops.gpu_contiguous方法的具体用法?Python basic_ops.gpu_contiguous怎么用?Python basic_ops.gpu_contiguous使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.sandbox.cuda.basic_ops
的用法示例。
在下文中一共展示了basic_ops.gpu_contiguous方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_gradweight
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def run_gradweight(self, inputs_shape, filters_shape, dCdH_shape,
subsample=(1, 1, 1)):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
dCdH_val = numpy.random.random(dCdH_shape).astype('float32')
inputs = shared(inputs_val)
dCdH = shared(dCdH_val)
conv = theano.tensor.nnet.convGrad3D(V=inputs, dCdH=dCdH,
WShape=filters_shape,
d=subsample)
img = gpu_contiguous(inputs.dimshuffle(0, 4, 1, 2, 3))
topgrad = gpu_contiguous(dCdH.dimshuffle(0, 4, 1, 2, 3))
if (subsample == (1, 1, 1)):
conv_gemm = GpuCorr3dMM_gradWeights(subsample=subsample)(img,
topgrad)
else:
conv_gemm = GpuCorr3dMM_gradWeights(subsample=subsample)(
img, topgrad, shape=filters_shape[1:4])
conv_gemm = conv_gemm.dimshuffle(0, 2, 3, 4, 1)
f_ref = theano.function([], conv)
f = theano.function([], conv_gemm, mode=mode_with_gpu)
res_ref = f_ref()
res = f()
utt.assert_allclose(res_ref, res)
示例2: output
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def output(self, input=None, dropout_active=True, *args, **kwargs):
if input == None:
input = self.input_layer.output(dropout_active=dropout_active, *args, **kwargs)
if dropout_active and (self.dropout > 0.):
retain_prob = 1 - self.dropout
mask = layers.srng.binomial(input.shape, p=retain_prob, dtype='int32').astype('float32')
# apply the input mask and rescale the input accordingly. By doing this it's no longer necessary to rescale the weights at test time.
input = input / retain_prob * mask
contiguous_input = gpu_contiguous(input)
contiguous_filters = gpu_contiguous(self.W)
conved = self.filter_acts_op(contiguous_input, contiguous_filters)
if self.untie_biases:
conved += self.b.dimshuffle(0, 1, 2, 'x')
else:
conved += self.b.dimshuffle(0, 'x', 'x', 'x')
return self.nonlinearity(conved)
示例3: prob_max_pool_c01b
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def prob_max_pool_c01b(c01b, pool_shape, top_down = None):
"""
.. todo::
WRITEME
"""
if pool_shape[0] != pool_shape[1]:
raise UnimplementedError("Non sqaure pool shapes are not supported yet")
assert pool_shape[0] > 0
ch, zr, zc, batch_size = c01b.shape
r, c = pool_shape
if top_down is None:
top_down = tensor.zeros((ch, zr / r, zc / c, batch_size), dtype = c01b.dtype)
op = ProbMaxPool(pool_shape[0])
c01b = gpu_contiguous(c01b)
top_down = gpu_contiguous(top_down)
return op(c01b, top_down)
示例4: grad
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def grad(self, inp, grads):
"""
.. todo::
WRITEME
"""
x, top_down = inp
p, h = self(x, top_down)
gp, gh = grads
gp_iszero = 0.
gh_iszero = 0.
if isinstance(gp.type, theano.gradient.DisconnectedType):
gp = tensor.zeros_like(p)
gp_iszero = 1.
if isinstance(gh.type, theano.gradient.DisconnectedType):
gh = tensor.zeros_like(h)
gh_iszero = 1.
gp = gpu_contiguous(gp)
gh = gpu_contiguous(gh)
gp_iszero = as_cuda_ndarray_variable(gp_iszero)
gh_iszero = as_cuda_ndarray_variable(gh_iszero)
return ProbMaxPoolGrad(self.ds, self.stride, self.start)(p, h, gp, gh, gp_iszero, gh_iszero)
# Make sure the cuda_convnet library is compiled and up-to-date
示例5: grad
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def grad(self, inputs, dout):
"""
.. todo::
WRITEME
"""
images, = inputs
acts, denoms = self(images)
dout, _ = dout # Ignore the gradient on "denoms"
dout = as_cuda_ndarray_variable(dout)
# dout must be contiguous, but it isn't always so, depending
# of what is done on output of this node.
dout = gpu_contiguous(dout)
grad_op = CrossMapNormUndo(self._size_f, self._add_scale,
self._pow_scale, self._blocked,
inplace=False)
return [grad_op(images, acts, denoms, dout)[0]]
示例6: output
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def output(self, input=None, dropout_active=True, *args, **kwargs):
if input is None:
input = self.input_layer.output(dropout_active=dropout_active,
*args, **kwargs)
if dropout_active and (self.dropout > 0.):
retain_prob = 1 - self.dropout
mask = layers.srng.binomial(input.shape, p=retain_prob,
dtype='int32').astype('float32')
# apply the input mask and rescale the input accordingly.
# By doing this it's no longer necessary to rescale the weights
# at test time.
input = input / retain_prob * mask
contiguous_input = gpu_contiguous(input)
contiguous_filters = gpu_contiguous(self.W)
conved = self.filter_acts_op(contiguous_input, contiguous_filters)
if self.untie_biases:
conved += self.b.dimshuffle(0, 1, 2, 'x')
else:
conved += self.b.dimshuffle(0, 'x', 'x', 'x')
return self.nonlinearity(conved)
示例7: __init__
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def __init__(self, filter_size=7, num_channels=3):
# magic numbers that make things work for stl10
self.filter_size = filter_size
self.pad = self.filter_size / 2 # -1
self.num_channels = num_channels
self.num_filters = 16
input = T.ftensor4(name='input')
filter = T.ftensor4(name='filter')
gpu_input = gpu_contiguous(input)
gpu_filter = gpu_contiguous(filter)
self.conv_func = theano.function([input, filter],
FilterActs(pad=self.pad)(gpu_input,
gpu_filter))
n = self.num_channels * self.filter_size * self.filter_size
self.w = numpy.float32(numpy.ones((self.num_channels, self.filter_size,
self.filter_size,
self.num_filters))) / n
示例8: test_local_gpu_contiguous_gpu_contiguous
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def test_local_gpu_contiguous_gpu_contiguous():
a = tensor.fmatrix()
o1 = basic_ops.gpu_contiguous(a)
o2 = basic_ops.gpu_contiguous(o1)
f1 = theano.function([a], o1, mode=mode_with_gpu)
f2 = theano.function([a], o2, mode=mode_with_gpu)
assert 1 == len([node for node in f1.maker.fgraph.toposort()
if isinstance(node.op, basic_ops.GpuContiguous)])
assert 1 == len([node for node in f2.maker.fgraph.toposort()
if isinstance(node.op, basic_ops.GpuContiguous)])
示例9: run_gradinput
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def run_gradinput(self, inputs_shape, filters_shape,
subsample=(1, 1, 1)):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
inputs = shared(inputs_val)
filters = shared(filters_val)
bias = shared(numpy.zeros(filters_shape[4]).astype('float32'))
conv = theano.tensor.nnet.convTransp3D(W=filters, b=bias, d=subsample,
H=inputs)
f_ref = theano.function([], conv)
res_ref = f_ref()
# Get bottom shape using convTransp3D
bottom_shape = res_ref.shape
bottom_val = numpy.random.random(bottom_shape).astype('float32')
bottom = shared(bottom_val)
weight = gpu_contiguous(filters.dimshuffle(0, 4, 1, 2, 3))
top = gpu_contiguous(inputs.dimshuffle(0, 4, 1, 2, 3))
if (subsample == (1, 1, 1)):
conv_gemm = GpuCorr3dMM_gradInputs(subsample=subsample)(
kern=weight, topgrad=top)
else:
conv_gemm = GpuCorr3dMM_gradInputs(subsample=subsample)(
kern=weight, topgrad=top,
shape=bottom.shape[1:4])
conv_gemm = conv_gemm.dimshuffle(0, 2, 3, 4, 1)
f = theano.function([], conv_gemm, mode=mode_with_gpu)
res = f()
utt.assert_allclose(res_ref, res)
示例10: f_conv
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def f_conv(self, x, spec, in_dim, weight_name):
layer_type, dims = spec
num_filters = dims[0]
filter_size = (dims[1], dims[1])
stride = (dims[2], dims[2])
bm = 'full' if 'convf' in layer_type else 'valid'
num_channels = in_dim[0]
W = self.weight(self.rand_init_conv(
(num_filters, num_channels) + filter_size), weight_name)
if stride != (1, 1):
f = GpuCorrMM(subsample=stride, border_mode=bm, pad=(0, 0))
y = f(gpu_contiguous(x), gpu_contiguous(W))
else:
assert self.p.batch_size == self.p.valid_batch_size
y = conv2d(x, W, image_shape=(2*self.p.batch_size, ) + in_dim,
filter_shape=((num_filters, num_channels) +
filter_size), border_mode=bm)
output_size = ((num_filters,) +
ConvOp.getOutputShape(in_dim[1:], filter_size,
stride, bm))
return y, output_size
示例11: stochastic_max_pool_c01b
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def stochastic_max_pool_c01b(c01b, pool_shape, pool_stride, start=0, seed = 1234):
"""
.. todo::
WRITEME
"""
assert pool_shape[0] == pool_shape[1]
assert pool_stride[0] == pool_stride[1]
op = StochasticMaxPool(pool_shape[0], pool_stride[0], start, seed)
c01b = gpu_contiguous(c01b)
return op(c01b)
示例12: weighted_max_pool_c01b
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def weighted_max_pool_c01b(c01b, pool_shape, pool_stride, start=0):
"""
.. todo::
WRITEME
"""
assert pool_shape[0] == pool_shape[1]
assert pool_stride[0] == pool_stride[1]
op = WeightedMaxPool(pool_shape[0], pool_stride[0], start)
c01b = gpu_contiguous(c01b)
return op(c01b)
示例13: grad
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def grad(self, inp, grads):
"""
.. todo::
WRITEME
"""
x, seed = inp
gz, = grads
gz = gpu_contiguous(gz)
maxout = self(x)
return [MaxPoolGrad(self.ds, self.stride, self.start)(x, maxout, gz), zeros_like(seed)]
# Make sure the cuda_convnet library is compiled and up-to-date
示例14: R_op
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def R_op(self, inp, evals):
"""
.. todo::
WRITEME
"""
x, = inp
ev, = evals
if ev is not None:
ev = gpu_contiguous(ev)
return [MaxPoolRop(self.ds, self.stride, self.start)(x, ev)]
else:
return [None]
示例15: grad
# 需要导入模块: from theano.sandbox.cuda import basic_ops [as 别名]
# 或者: from theano.sandbox.cuda.basic_ops import gpu_contiguous [as 别名]
def grad(self, inp, grads):
"""
.. todo::
WRITEME
"""
x, = inp
gz, = grads
gz = gpu_contiguous(gz)
maxout = self(x)
return [MaxPoolGrad(self.ds, self.stride, self.start)(x, maxout, gz)]
# Make sure the cuda_convnet library is compiled and up-to-date