当前位置: 首页>>代码示例>>Python>>正文


Python tensor.patternbroadcast方法代码示例

本文整理汇总了Python中theano.tensor.patternbroadcast方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.patternbroadcast方法的具体用法?Python tensor.patternbroadcast怎么用?Python tensor.patternbroadcast使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.patternbroadcast方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: grad

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def grad(self, inp, grads):
        bottom, weights = inp
        top, = grads
        d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,
                                             self.border_mode,
                                             self.subsample,
                                             self.filter_flip)(
            weights, top, bottom.shape[-2:])
        d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,
                                               self.border_mode,
                                               self.subsample,
                                               self.filter_flip)(

            bottom, top, weights.shape[-2:])

        # Make sure that the broadcastable pattern of the inputs is used
        # for the gradients, even if the grad opts are not able to infer
        # that the dimensions are broadcastable.
        # Also make sure that the gradient lives on the same device than
        # the corresponding input.
        d_bottom = patternbroadcast(d_bottom, bottom.broadcastable)
        d_bottom = bottom.type.filter_variable(d_bottom)
        d_weights = patternbroadcast(d_weights, weights.broadcastable)
        d_weights = weights.type.filter_variable(d_weights)
        return d_bottom, d_weights 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:27,代码来源:abstract_conv.py

示例2: local_conv_dnn

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def local_conv_dnn(node):
        if not dnn_available():
            return
        if isinstance(node.op, GpuConv):
            if node.op.border_mode not in ['full', 'valid']:
                return
            img, kern = node.inputs
            border_mode = node.op.border_mode
            subsample = node.op.subsample
            direction_hint = node.op.direction_hint
            rval = dnn_conv(img, kern,
                            border_mode=border_mode, subsample=subsample,
                            direction_hint=direction_hint)
            if node.outputs[0].broadcastable != rval.broadcastable:
                rval = tensor.patternbroadcast(
                    rval, node.outputs[0].type.broadcastable)
            return [rval]

    # This optimizer is registered in opt.py as part of the meta-optimizer.
    # It tries exactly the opposite code path of what local_conv_dnn() uses,
    # because for some input/kernel shape configurations, this is faster. 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:23,代码来源:dnn.py

示例3: local_gpualloc_memset_0

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def local_gpualloc_memset_0(node):
    if isinstance(node.op, GpuAlloc) and not node.op.memset_0:
        inp = node.inputs[0]
        if (isinstance(inp, CudaNdarrayConstant) and
            inp.data.size == 1 and
            (numpy.asarray(inp.data) == 0).all()):

            new_out = GpuAlloc(memset_0=True)(*node.inputs)
            old_bcast = node.outputs[0].type.broadcastable
            if new_out.type.broadcastable != old_bcast:
                # check that we did not try discarding a broadcastable
                # dimension
                assert not any(b_old and not b_new for b_old, b_new in
                               zip(old_bcast, new_out.type.broadcastable))
                # force old broadcasting pattern; we must not change it here
                new_out = tensor.patternbroadcast(new_out, old_bcast)
            return [new_out] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:19,代码来源:opt.py

示例4: local_abstractconv_gradweight_gemm

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def local_abstractconv_gradweight_gemm(node):
    if not isinstance(node.op, AbstractConv2d_gradWeights):
        return None
    img, topgrad, shape = node.inputs
    if not isinstance(img.type, CudaNdarrayType) or \
            not isinstance(topgrad.type, CudaNdarrayType):
        return None

    rval = GpuCorrMM_gradWeights(border_mode=node.op.border_mode,
                                 subsample=node.op.subsample)(
        gpu_contiguous(img), gpu_contiguous(topgrad), shape)
    if node.op.filter_flip:
        rval = rval[:, :, ::-1, ::-1]
    rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
    rval = as_cuda_ndarray_variable(rval)
    return [rval] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:18,代码来源:opt.py

示例5: sample_noise

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def sample_noise(self, input):
        # get noise_shape
        noise_shape = self.input_shape
        if any(s is None for s in noise_shape):
            noise_shape = input.shape

        # respect shared axes
        if self.shared_axes:
            shared_axes = tuple(a if a >= 0 else a + input.ndim
                                for a in self.shared_axes)
            noise_shape = tuple(1 if a in shared_axes else s
                                for a, s in enumerate(noise_shape))

        one = tt.constant(1)
        retain_prob = one - self.p
        noise = self._srng.binomial(noise_shape, p=retain_prob,
                                    dtype=floatX)

        if self.shared_axes:
            bcast = tuple(bool(s == 1) for s in noise_shape)
            noise = tt.patternbroadcast(noise, bcast)

        return noise 
开发者ID:mcgillmrl,项目名称:kusanagi,代码行数:25,代码来源:layers.py

示例6: init_state

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def init_state(self, network):
        super(SimpleRecurrentNode, self).init_state(network)
        num_units = network.find_hyperparameter(["num_units"])
        # FIXME use batch_axis instead of batch_size
        batch_size = network.find_hyperparameter(["batch_size"])
        if batch_size is None:
            shape = (num_units,)
        else:
            shape = (batch_size, num_units)
        zeros = T.zeros(shape)
        # unfortunately, theano.tensor.zeros makes the result broadcastable
        # if the shape of any dimension is 1, so we have to undo this
        value = T.patternbroadcast(zeros, (False,) * len(shape))
        network.set_hyperparameter(self._name + "_initialstate",
                                   "constant_value",
                                   value) 
开发者ID:SBU-BMI,项目名称:u24_lymphocyte,代码行数:18,代码来源:recurrent.py

示例7: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def get_output_for(self, input, deterministic=False, **kwargs):
        if deterministic or self.alpha == 0:
            return T.ones_like(self.retain, dtype=input.dtype)
        else:
            # use nonsymbolic shape for dropout mask if possible
            mask_shape = self.input_shape
            if any(s is None for s in mask_shape):
                mask_shape = input.shape

            # apply dropout, respecting shared axes
            if self.shared_axes:
                shared_axes = tuple(a if a >= 0 else a + input.ndim
                                    for a in self.shared_axes)
                mask_shape = tuple(1 if a in shared_axes else s
                                   for a, s in enumerate(mask_shape))
            mask = self._srng.binomial(mask_shape, p=self.retain,
                                       dtype=input.dtype)
            if self.shared_axes:
                bcast = tuple(bool(s == 1) for s in mask_shape)
                mask = T.patternbroadcast(mask, bcast)
            return mask 
开发者ID:diegma,项目名称:neural-dep-srl,代码行数:23,代码来源:WordDropout.py

示例8: pattern_broadcast

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def pattern_broadcast(x, broatcastable):
    return T.patternbroadcast(x, broatcastable)

# VALUE MANIPULATION 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:6,代码来源:theano_backend.py

示例9: dropout

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def dropout(x, level, noise_shape=None, seed=None):
    '''Sets entries in `x` to zero at random,
    while scaling the entire tensor.

    # Arguments
        x: tensor
        level: fraction of the entries in the tensor
            that will be set to 0.
        noise_shape: shape for randomly generated keep/drop flags,
            must be broadcastable to the shape of `x`
        seed: random seed to ensure determinism.
    '''
    if level < 0. or level >= 1:
        raise ValueError('Dropout level must be in interval [0, 1[.')
    if seed is None:
        seed = np.random.randint(1, 10e6)

    rng = RandomStreams(seed=seed)
    retain_prob = 1. - level

    if noise_shape is None:
        random_tensor = rng.binomial(x.shape, p=retain_prob, dtype=x.dtype)
    else:
        random_tensor = rng.binomial(noise_shape, p=retain_prob, dtype=x.dtype)
        random_tensor = T.patternbroadcast(random_tensor, [dim == 1 for dim in noise_shape])

    x *= random_tensor
    x /= retain_prob
    return x 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:31,代码来源:theano_backend.py

示例10: grad

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def grad(self, inputs, gout):
        (x,) = inputs
        (gz,) = gout
        gx = dense_from_sparse(gz)
        gx = tensor.patternbroadcast(gx, x.broadcastable)
        return gx, 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:8,代码来源:basic.py

示例11: local_gpu_reshape

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def local_gpu_reshape(node):
    if isinstance(node.op, GpuFromHost):
        host_input = node.inputs[0]
        if host_input.owner and \
           isinstance(host_input.owner.op, tensor.Reshape):
            rshp = host_input.owner.op
            x, shp = host_input.owner.inputs
            gpu_reshape = GpuReshape(rshp.ndim)(as_cuda_ndarray_variable(x),
                                                shp)
            if gpu_reshape.broadcastable != node.outputs[0].broadcastable:
                # this can happen as we always return False for all broadcast
                # dim in GpuReshape but not for Reshape
                # Event if we did the same think, with the constant
                # optimization that could happen.
                gpu_reshape = theano.tensor.patternbroadcast(
                    gpu_reshape, node.outputs[0].broadcastable)
            return [gpu_reshape]
    if isinstance(node.op, tensor.Reshape):
        x, shp = node.inputs
        if x.owner and isinstance(x.owner.op, HostFromGpu):
            gpu_x, = x.owner.inputs
            gpu_reshape = GpuReshape(node.op.ndim)(gpu_x, shp)
            if gpu_reshape.broadcastable != node.outputs[0].broadcastable:
                # this can happen as we always return False for all broadcast
                # dim in GpuReshape but not for Reshape
                # Event if we did the same think, with the constant
                # optimization that could happen.
                gpu_reshape = theano.tensor.patternbroadcast(
                    gpu_reshape, node.outputs[0].broadcastable)
            return [host_from_gpu(gpu_reshape)]
    return False 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:33,代码来源:opt.py

示例12: _gpu_conv_to_fftconv

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def _gpu_conv_to_fftconv(node):
    # shared helper function for local_conv_fft_valid and local_conv_fft_full.
    # we import conv2d_fft locally to avoid pycuda warnings
    from theano.sandbox.cuda.fftconv import conv2d_fft
    kwargs = {'border_mode': node.op.border_mode}
    if (node.op.imshp is not None and
        node.op.imshp[-1] is not None and
        node.op.imshp[-1] % 2 == 1):

        kwargs['pad_last_dim'] = True
    # If the user supplied the full nonsymbolic image_shape and
    # filter_shape in conv2d(), we can pass it on to conv2d_fft().
    if ((node.op.imshp is not None) and
            (len(node.op.imshp) == 3) and
            (None not in node.op.imshp) and
            (node.op.bsize is not None)):
        kwargs['image_shape'] = (node.op.bsize,) + node.op.imshp
    if ((node.op.kshp is not None) and
            (None not in node.op.kshp) and
            (node.op.nkern is not None) and
            (len(node.op.imshp) == 3) and
            (node.op.imshp[0] is not None)):
        kwargs['filter_shape'] = (node.op.nkern, node.op.imshp[0]) + \
            node.op.kshp
    rval = conv2d_fft(node.inputs[0], node.inputs[1], **kwargs)
    if node.outputs[0].broadcastable != rval.broadcastable:
        # With given shape information, conv2d_fft may return a different
        # broadcast pattern than GpuConv. This is forbidden, so we fix it.
        rval = tensor.patternbroadcast(
            rval, node.outputs[0].type.broadcastable)
    return rval 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:33,代码来源:opt.py

示例13: test_broadcast

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def test_broadcast(self):
        # Test that we can rebroadcast
        data = numpy.random.rand(10, 10).astype('float32')
        output_var = f32sc(name="output", value=data)

        up = tensor.unbroadcast(output_var.sum().dimshuffle('x', 'x'), 0, 1)
        output_func = theano.function(inputs=[], outputs=[],
                                      updates=[(output_var, up)])
        output_func()

        up = tensor.patternbroadcast(output_var.sum().dimshuffle('x', 'x'),
                                     output_var.type.broadcastable)
        output_func = theano.function(inputs=[], outputs=[],
                                      updates=[(output_var, up)])
        output_func() 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:17,代码来源:test_var.py

示例14: cosine_sim

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def cosine_sim(k, M):
    k_unit = k / (T.sqrt(T.sum(k**2)) + 1e-5)
    # T.patternbroadcast(k_unit.reshape((1,k_unit.shape[0])),(True,False))
    k_unit = k_unit.dimshuffle(('x', 0))
    k_unit.name = "k_unit"
    M_lengths = T.sqrt(T.sum(M**2, axis=1)).dimshuffle((0, 'x'))
    M_unit = M / (M_lengths + 1e-5)
    M_unit.name = "M_unit"
    return T.sum(k_unit * M_unit, axis=1) 
开发者ID:memray,项目名称:seq2seq-keyphrase,代码行数:11,代码来源:theano_utils.py

示例15: pattern_broadcast

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import patternbroadcast [as 别名]
def pattern_broadcast(x, broadcastable):
    return T.patternbroadcast(x, broadcastable)

# VALUE MANIPULATION 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:6,代码来源:theano_backend.py


注:本文中的theano.tensor.patternbroadcast方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。