当前位置: 首页>>代码示例>>Python>>正文


Python tensor.get_vector_length函数代码示例

本文整理汇总了Python中theano.tensor.get_vector_length函数的典型用法代码示例。如果您正苦于以下问题:Python get_vector_length函数的具体用法?Python get_vector_length怎么用?Python get_vector_length使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了get_vector_length函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: quantized_lognormal_mixture_sampler

def quantized_lognormal_mixture_sampler(rstream, weights, mus, sigmas, step, draw_shape=None, ndim=None, dtype=None):
    rstate = rstream.new_shared_rstate()
    # shape prep
    if draw_shape is None:
        raise NotImplementedError()
    elif draw_shape is tensor.as_tensor_variable(draw_shape):
        shape = draw_shape
        if ndim is None:
            ndim = tensor.get_vector_length(shape)
    elif tuple(draw_shape) == ():
        ndim = 0
        shape = tensor.as_tensor_variable(numpy.asarray([], dtype="int"))
    else:
        shape = tensor.stack(*draw_shape)
        if ndim is None:
            ndim = len(draw_shape)
        assert tensor.get_vector_length(shape) == ndim

    # XXX: be smarter about inferring broadcastable
    op = QuantizedLognormalMixture(
        tensor.TensorType(broadcastable=(False,) * ndim, dtype=theano.config.floatX if dtype is None else dtype)
    )
    rs, out = op(rstate, shape, weights, mus, sigmas, step)
    rstream.add_default_update(out, rstate, rs)
    return out
开发者ID:gwtaylor,项目名称:MonteTheano,代码行数:25,代码来源:distributions.py

示例2: BGMM1_sampler

def BGMM1_sampler(rstream, weights, mus, sigmas, low, high,
        draw_shape=None, ndim=None, dtype=None):
    rstate = rstream.new_shared_rstate()

    # shape prep
    if draw_shape is None:
        raise NotImplementedError()
    elif draw_shape is tensor.as_tensor_variable(draw_shape):
        shape = draw_shape
        if ndim is None:
            ndim = tensor.get_vector_length(shape)
    else:
        shape = tensor.hstack(*draw_shape)
        if ndim is None:
            ndim = len(draw_shape)
        assert tensor.get_vector_length(shape) == ndim

    # XXX: be smarter about inferring broadcastable
    op = BGMM1(
            tensor.TensorType(
                broadcastable=(False,) * ndim,
                dtype=theano.config.floatX if dtype is None else dtype))
    rs, out = op(rstate, weights, mus, sigmas, low, high, shape)
    rstream.add_default_update(out, rstate, rs)
    return out
开发者ID:helson73,项目名称:MonteTheano,代码行数:25,代码来源:distributions.py

示例3: DM_sampler

def DM_sampler(rstream, alpha, draw_shape=None, ndim=None, dtype=None):
    shape = infer_shape(rstream.dirichlet(alpha, draw_shape=draw_shape))
    rstate = rstream.new_shared_rstate()
    op = DM(tensor.TensorType(broadcastable=(False,) * tensor.get_vector_length(shape), dtype=theano.config.floatX))
    rs, out = op(rstate, alpha)
    rstream.add_default_update(out, rstate, rs)
    return out
开发者ID:yamins81,项目名称:MonteTheano,代码行数:7,代码来源:distributions.py

示例4: categorical_sampler

def categorical_sampler(rstream, p, draw_shape, dtype="int32"):
    if not isinstance(p, theano.Variable):
        p = tensor._shared(numpy.asarray(p, dtype=theano.config.floatX))
    if p.ndim != 1:
        raise NotImplementedError()
    if draw_shape.ndim != 1:
        raise TypeError()
    op = Categorical(
        False, tensor.TensorType(broadcastable=(False,) * tensor.get_vector_length(draw_shape), dtype=dtype)
    )
    rstate = rstream.new_shared_rstate()
    new_rstate, out = op(rstate, p, draw_shape)
    rstream.add_default_update(out, rstate, new_rstate)
    return out
开发者ID:yamins81,项目名称:MonteTheano,代码行数:14,代码来源:distributions.py

示例5: new_auto_update

    def new_auto_update(cls, generator, ndim, dtype, size, seed):
        """
        Return a symbolic sample from generator.

        cls dictates the random variable (e.g. uniform, normal).

        """
        v_size = theano.tensor.as_tensor_variable(size)
        if ndim is None:
            ndim = get_vector_length(v_size)
        self = cls(output_type=CudaNdarrayType((False,) * ndim), seed=seed, destructive=False)

        o_gen, sample = self(generator, cast(v_size, "int32"))

        sample.generator = generator  # for user
        sample.update = (generator, o_gen)  # for CURAND_RandomStreams
        generator.default_update = o_gen  # for pfunc uses this attribute
        return sample
开发者ID:huamichaelchen,项目名称:Theano,代码行数:18,代码来源:rng_curand.py

示例6: _infer_ndim_bcast

def _infer_ndim_bcast(ndim, shape, *args):
    """
    Infer the number of dimensions from the shape or the other arguments.

    Returns
    -------
    (int, variable, tuple) triple, where the variable is an integer vector,
    and the tuple contains Booleans
        The first element returned is the inferred number of dimensions.
        The second element is the shape inferred (combining symbolic and
        constant informations from shape and args).
        The third element is a broadcasting pattern corresponding to that shape.

    """

    # Find the minimum value of ndim required by the *args
    if args:
        args_ndim = max(arg.ndim for arg in args)
    else:
        args_ndim = 0

    if isinstance(shape, (tuple, list)):
        # there is a convention that -1 means the corresponding shape of a
        # potentially-broadcasted symbolic arg
        #
        # This case combines together symbolic and non-symbolic shape
        # information
        shape_ndim = len(shape)
        if ndim is None:
            ndim = shape_ndim
        else:
            if shape_ndim != ndim:
                raise ValueError('ndim should be equal to len(shape), but\n',
                                 'ndim = %s, len(shape) = %s, shape = %s'
                                 % (ndim, shape_ndim, shape))

        bcast = []
        pre_v_shape = []
        for i, s in enumerate(shape):
            if hasattr(s, 'type'):  # s is symbolic
                bcast.append(False)  # todo - introspect further
                pre_v_shape.append(s)
            else:
                if s >= 0:
                    pre_v_shape.append(tensor.as_tensor_variable(s))
                    bcast.append((s == 1))
                elif s == -1:
                    n_a_i = 0
                    for a in args:
                        # ndim: _   _   _   _   _   _
                        # ashp:         s0  s1  s2  s3
                        #           i
                        if i >= ndim - a.ndim:
                            n_a_i += 1
                            a_i = i + a.ndim - ndim
                            if not a.broadcastable[a_i]:
                                pre_v_shape.append(a.shape[a_i])
                                bcast.append(False)
                                break
                    else:
                        if n_a_i == 0:
                            raise ValueError((
                                'Auto-shape of -1 must overlap'
                                'with the shape of one of the broadcastable'
                                'inputs'))
                        else:
                            pre_v_shape.append(tensor.as_tensor_variable(1))
                            bcast.append(True)
                else:
                    ValueError('negative shape', s)
        # post-condition: shape may still contain both symbolic and
        # non-symbolic things
        if len(pre_v_shape) == 0:
            v_shape = tensor.constant([], dtype='int64')
        else:
            v_shape = tensor.stack(pre_v_shape)

    elif shape is None:
        # The number of drawn samples will be determined automatically,
        # but we need to know ndim
        if not args:
            raise TypeError(('_infer_ndim_bcast cannot infer shape without'
                             ' either shape or args'))
        template = reduce(lambda a, b: a + b, args)
        v_shape = template.shape
        bcast = template.broadcastable
        ndim = template.ndim
    else:
        v_shape = tensor.as_tensor_variable(shape)
        if v_shape.ndim != 1:
            raise TypeError(
                "shape must be a vector or list of scalar, got '%s'" % v_shape)

        if ndim is None:
            ndim = tensor.get_vector_length(v_shape)
        bcast = [False] * ndim

    if v_shape.ndim != 1:
        raise TypeError("shape must be a vector or list of scalar, got '%s'" %
                        v_shape)
#.........这里部分代码省略.........
开发者ID:Faruk-Ahmed,项目名称:Theano,代码行数:101,代码来源:raw_random.py

示例7: new

 def new(cls, rstate, ndim, dtype, size):
     v_size = as_tensor_variable(size)
     if ndim is None:
         ndim = get_vector_length(v_size)
     op = cls(TensorType(dtype, (False,) * ndim))
     return op(rstate, cast(v_size, "int32"))
开发者ID:Tanjay94,项目名称:Theano,代码行数:6,代码来源:rng_mrg.py

示例8: new

 def new(cls, rstate, ndim, dtype, size):
     v_size = as_tensor_variable(size)
     if ndim is None:
         ndim = get_vector_length(v_size)
     op = cls(GpuArrayType(dtype, (False,) * ndim))
     return op(rstate, v_size)
开发者ID:noskill,项目名称:Theano,代码行数:6,代码来源:rng_mrg.py


注:本文中的theano.tensor.get_vector_length函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。