当前位置: 首页>>代码示例>>Python>>正文


Python theano.Op方法代码示例

本文整理汇总了Python中theano.Op方法的典型用法代码示例。如果您正苦于以下问题:Python theano.Op方法的具体用法?Python theano.Op怎么用?Python theano.Op使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano的用法示例。


在下文中一共展示了theano.Op方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: local_gpu_elemwise_careduce

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def local_gpu_elemwise_careduce(node):
    """
    Merge some GpuCAReduceCuda and GPUElemwise.

    """
    if (isinstance(node.op, GpuCAReduceCuda) and
            node.op.pre_scalar_op is None and
            node.inputs[0].owner and
            isinstance(node.inputs[0].owner.op, GpuElemwise) and
            # The Op support all scalar with 1 inputs.  We don't
            # automatically add more case, as some like trigonometic
            # operation with some reduction pattern will probably results
            # in slow down.
            isinstance(node.inputs[0].owner.op.scalar_op, scalar.basic.Sqr)):
        op = node.op
        inp = node.inputs[0].owner.inputs[0]
        return [GpuCAReduceCuda(scalar_op=op.scalar_op,
                                axis=op.axis,
                                reduce_mask=op.reduce_mask,
                                pre_scalar_op=scalar.basic.sqr)(inp)] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:22,代码来源:opt.py

示例2: in_shape

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def in_shape(self, output_shape):
        out_dims = list(output_shape[:self.n_dims_before])
        num_strides = []

        # in the inverse case we don't worry about borders:
        # they either have been filled with zeros, or have been cropped
        for i, ds in enumerate(self.dims_neighbourhoods):
            # the number of strides performed by NeighFromImg is
            # directly given by this shape
            num_strides.append(output_shape[self.n_dims_before + i])

            # our Op's output image must be at least this wide
            at_least_width = num_strides[i] * self.strides[i]

            # ... which gives us this number of neighbourhoods
            num_neigh = at_least_width // ds
            if at_least_width % ds != 0:
                num_neigh += 1

            # making the final Op's output dimension this wide
            out_dims.append(num_neigh * ds)

        return out_dims, num_strides 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:25,代码来源:neighbourhoods.py

示例3: _py_assignment

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def _py_assignment(self):
        # TODO : need description for method and return
        input_idx = "".join(["outer_idx_%d," % (i,)
                            for i in xrange(self.n_dims_before)])
        input_idx += "".join(["dim_%d_offset+neigh_idx_%d," %
                             (i, i) for i in xrange(len(self.strides))])
        out_idx = "".join(
            ["outer_idx_%d," % (i,) for i in xrange(self.n_dims_before)] +
            ["stride_idx_%d," % (i,) for i in xrange(len(self.strides))])
        out_idx += self._py_flattened_idx()

        # return_val = '\t' * (self.n_dims_before + len(self.strides)*2)
        # return_val += "print "+input_idx+"'\\n',"+out_idx+"\n"

        return_val = '\t' * (self.n_dims_before + len(self.strides) * 2)

        if self.inverse:
            # remember z and x are inversed:
            # z is the Op's output, but has input_shape
            # x is the Op's input, but has out_shape
            return_val += "z[0][%s] = x[%s]\n" % (input_idx, out_idx)
        else:
            return_val += "z[0][%s] = x[%s]\n" % (out_idx, input_idx)

        return return_val 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:27,代码来源:neighbourhoods.py

示例4: _py_assignment

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def _py_assignment(self):
        input_idx = "".join(["outer_idx_%d," % (i,)
                            for i in xrange(self.n_dims_before)])
        input_idx += "".join(["dim_%d_offset+neigh_idx_%d," %
                             (i, i) for i in xrange(len(self.strides))])
        out_idx = "".join(
            ["outer_idx_%d," % (i,) for i in xrange(self.n_dims_before)] +
            ["stride_idx_%d," % (i,) for i in xrange(len(self.strides))])
        out_idx += self._py_flattened_idx()

        # return_val = '\t' * (self.n_dims_before + len(self.strides)*2)
        # return_val += "print "+input_idx+"'\\n',"+out_idx+"\n"

        return_val = '\t' * (self.n_dims_before + len(self.strides) * 2)

        if self.inverse:
            # remember z and x are inversed:
            # z is the Op's output, but has input_shape
            # x is the Op's input, but has out_shape
            return_val += "z[0][%s] = x[%s]\n" % (input_idx, out_idx)
        else:
            return_val += "z[0][%s] = x[%s]\n" % (out_idx, input_idx)

        return return_val 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:26,代码来源:neighbourhoods.py

示例5: __init__

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def __init__(self, inputDim=None, outputDim=None, activation=None):
        """
        :type inputDim: tuple of [int]
        :param inputDim: dimensionality of input

        :type outputDim: tuple of [int]
        :param outputDim: number of hidden units

        :type activation: theano.Op or function
        :param activation: Non linearity to be applied in the hidden layer
        """

        super(NonlinearityLayerParams, self).__init__(inputDim, outputDim)

        self._outputDim = self._inputDim
        self._activation = activation 
开发者ID:moberweger,项目名称:deep-prior-pp,代码行数:18,代码来源:nonlinearitylayer.py

示例6: __init__

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def __init__(self, inputDim=None, outputDim=None, activation=None, hasBias=True, init_method=None):
        """
        :type inputDim: tuple of [int]
        :param inputDim: dimensionality of input

        :type outputDim: tuple of [int]
        :param outputDim: number of hidden units

        :type activation: theano.Op or function
        :param activation: Non linearity to be applied in the hidden layer
        """

        super(HiddenLayerParams, self).__init__(inputDim, outputDim)

        self._activation = activation
        self._hasbias = hasBias
        self._init_method = init_method 
开发者ID:moberweger,项目名称:deep-prior-pp,代码行数:19,代码来源:hiddenlayer.py

示例7: infer_shape

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def infer_shape(self, node, input_shapes):
        """Return a list of output shapes based on ``input_shapes``.

        This method is optional. It allows to compute the shape of the
        output without having to evaluate.

        Parameters
        ----------
        node : `theano.gof.graph.Apply`
            The node of this Op in the computation graph.
        input_shapes : 1-element list of `theano.compile.ops.Shape`
            Symbolic shape of the input.

        Returns
        -------
        output_shapes : 1-element list of tuples
            Fixed shape of the output determined by `odl_op`.
        """
        if isinstance(self.operator, Functional):
            return [()]
        else:
            # Need to convert to native to avoid error in Theano from
            # future.int
            return [tuple(native(si) for si in self.operator.range.shape)] 
开发者ID:odlgroup,项目名称:odl,代码行数:26,代码来源:layer.py

示例8: __init__

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def __init__(self, rng, input, n_in, n_out, activation=T.tanh, name_prefix=''):
        """
        Typical hidden layer of a MLP: units are fully-connected and have
        sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
        and the bias vector b is of shape (n_out,).

        NOTE : The nonlinearity used here is tanh

        Hidden unit activation is given by: tanh(dot(input,W) + b)

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dmatrix
        :param input: a symbolic tensor of shape (n_examples, n_in)

        :type n_in: int
        :param n_in: dimensionality of input

        :type n_out: int
        :param n_out: number of hidden units

        :type activation: theano.Op or function
        :param activation: Non linearity to be applied in the hidden
                              layer
        """
        self.input = input

        # `W` is initialized with `W_values` which is uniformely sampled
        # from -6./sqrt(n_in+n_hidden) and 6./sqrt(n_in+n_hidden)
        # the output of uniform if converted using asarray to dtype
        # theano.config.floatX so that the code is runable on GPU
        W_values = numpy.asarray( rng.uniform( \
              low=-numpy.sqrt(6./(n_in+n_out)), \
              high=numpy.sqrt(6./(n_in+n_out)), \
              size=(n_in, n_out)), dtype=theano.config.floatX)
        self.W = theano.shared(value=W_values, name=name_prefix+'W')

        self.output = T.dot(input, self.W)
        # parameters of the model
        self.params = [self.W] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:43,代码来源:mlp_test.py

示例9: dnn_pool

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def dnn_pool(img, ws, stride=(1, 1), mode='max', pad=(0, 0)):
    """
    GPU pooling using cuDNN from NVIDIA.

    The memory layout to use is 'bc01', that is 'batch', 'channel',
    'first dim', 'second dim' in that order.

    `ws`, `stride` and `pad` must have the same length.

    Parameters
    ----------
    img
        Images to do the pooling over.
    ws : tuple
        Subsampling window size.
    stride : tuple
        Subsampling stride (default: (1, 1)).
    mode : {'max', 'average_inc_pad', 'average_exc_pad'}
    pad : tuple
        (padX, padY) or (padX, padY, padZ)
        default: (0, 0)

    .. warning:: The cuDNN library only works with GPU that have a compute
        capability of 3.0 or higer.  This means that older GPU will not
        work with this Op.

    Notes
    -----
    This Op implements the ignore_border=True of max_pool_2d.

    """
    img = gpu_contiguous(img)
    return GpuDnnPool(mode=mode)(img, ws, stride, pad) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:35,代码来源:dnn.py

示例10: grad

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def grad(self, inputs, output_grads):
        return [output_grads[0] * inputs[1], output_grads[0] * inputs[0]]


# 2. Op returns x + y and x - y 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:7,代码来源:extending_theano_solution_1.py

示例11: dnn_pool

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def dnn_pool(img, ws, stride=(1, 1), mode='max', pad=(0, 0)):
    """
    GPU pooling using cuDNN from NVIDIA.

    The memory layout to use is 'bc01', that is 'batch', 'channel',
    'first dim', 'second dim' in that order.

    `ws`, `stride` and `pad` must have the same length.

    Parameters
    ----------
    img
        Images to do the pooling over.
    ws : tuple
        Subsampling window size.
    stride : tuple
        Subsampling stride (default: (1, 1)).
    mode : {'max', 'average_inc_pad', 'average_exc_pad'}
    pad : tuple
        (padX, padY) or (padX, padY, padZ)
        default: (0, 0)

    .. warning:: The cuDNN library only works with GPU that have a compute
        capability of 3.0 or higer.  This means that older GPU will not
        work with this Op.

    Notes
    -----
    This Op implements the ignore_border=True of max_pool_2d.

    """
    img = gpu_contiguous(img)
    desc = GpuDnnPoolDesc(ws=ws, stride=stride, mode=mode, pad=pad)()
    return GpuDnnPool()(img, desc) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:36,代码来源:dnn.py

示例12: __init__

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def __init__(self, inputDim=None, outputDim=None, activation=None):
        """
        :type inputDim: tuple of [int]
        :param inputDim: dimensionality of input

        :type outputDim: tuple of [int]
        :param outputDim: number of hidden units

        :type activation: theano.Op or function
        :param activation: Non linearity to be applied in the hidden layer
        """

        super(HiddenLayerParams, self).__init__(inputDim, outputDim)

        self._activation = activation 
开发者ID:moberweger,项目名称:deep-prior,代码行数:17,代码来源:hiddenlayer.py

示例13: make_node

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def make_node(self, inp):
        if not cusolver_available:
            raise RuntimeError('CUSOLVER is not available and '
                               'GpuLU Op can not be constructed.')
        if skcuda.__version__ <= '0.5.1':
            warnings.warn('The GpuLU op requires scikit-cuda > 0.5.1 to work with CUDA 8')
        if not pygpu_available:
            raise RuntimeError('Missing pygpu or triu/tril functions.'
                               'Install or update libgpuarray.')
        context_name = infer_context_name(inp)

        inp = as_gpuarray_variable(inp, context_name)

        inp = gpu_contiguous(inp)

        # this op can only operate on float32 matrices
        # because of current implementation of triu/tril.
        # TODO: support float64
        assert inp.ndim == 2
        assert inp.dtype == 'float32'

        # outputs LU in a single matrix, and a pivots array
        pivots_type = GpuArrayType('int32',
                                   broadcastable=inp[0].broadcastable,
                                   context_name=context_name)()
        return theano.Apply(self, [inp], [inp.type(), pivots_type]) 
开发者ID:mcgillmrl,项目名称:kusanagi,代码行数:28,代码来源:extra_ops.py

示例14: _add_to_cache

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def _add_to_cache(self, module, key, module_hash):
        """
        This function expects the compile lock to be held.

        """
        name = module.__file__
        _logger.debug("Adding module to cache %s %s",
                      key, name)
        # Changing the hash of the key is not allowed during
        # compilation. That is the only cause found that makes
        # the following assert fail.
        assert key not in self.entry_from_key

        location = os.path.dirname(name)
        key_pkl = os.path.join(location, 'key.pkl')
        assert not os.path.exists(key_pkl)
        key_data = KeyData(
            keys=set([key]),
            module_hash=module_hash,
            key_pkl=key_pkl,
            entry=name)

        key_broken = False
        if key[0]:
            try:
                key_data.save_pkl()
            except pickle.PicklingError:
                key_broken = True
                key_data.remove_key(key)
                key_data.save_pkl()
            if not key_broken and self.check_for_broken_eq:
                self.check_key(key, key_pkl)
            self.loaded_key_pkl.add(key_pkl)
        elif config.cmodule.warn_no_version:
            key_flat = flatten(key)
            ops = [k for k in key_flat if isinstance(k, theano.Op)]
            _logger.warning("not all the"
                            " following op(s) implement"
                            " c_code_cache_version(). This makes them"
                            " recompiled for each process." + str(ops))
        self._update_mappings(key, key_data, module.__file__, not key_broken)
        return key_data 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:44,代码来源:cmodule.py

示例15: op_lifter

# 需要导入模块: import theano [as 别名]
# 或者: from theano import Op [as 别名]
def op_lifter(OP, cuda_only=False):
    """
    OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))

    gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)

    """
    def f(maker):
        def local_opt(node):
            if type(node.op) in OP:
                # Either one of our inputs is on the gpu or
                # all of our clients are on the gpu
                replace = False
                # TODO: Maybe set context_name with infer_context_name()?
                context_name = None
                # We replace if any input is a host_from_gpu
                for i in node.inputs:
                    if i.owner and i.owner.op == host_from_gpu:
                        context_name = i.owner.inputs[0].type.context_name
                        replace = True
                        break
                if not replace:
                    # We replace if *all* clients are on the GPU
                    clients = [c for o in node.outputs for c in o.clients]
                    replace = len(clients) != 0
                    for c, idx in clients:
                        if (c == 'output' or
                                not isinstance(c.op, GpuFromHost)):
                            replace = False
                    # TODO: check that the clients want the same context?
                    if replace:
                        # All clients are GpuFromHost and we have at least one
                        context_name = clients[0][0].op.context_name

                # Check if we should replace
                if (not replace or
                    (cuda_only and
                     get_context(context_name).kind != 'cuda')):
                    return False

                # tag the inputs with the context in case
                # the context was derived from the outputs
                for i in node.inputs:
                    i.tag.context_name = context_name
                new_op = maker(node, context_name)
                # This is needed as sometimes new_op inherits from OP.
                if new_op and new_op != node.op:
                    if isinstance(new_op, theano.Op):
                        return [safe_to_cpu(o) for o in
                                new_op(*node.inputs, return_list=True)]
                    elif isinstance(new_op, (tuple, list)):
                        return [safe_to_cpu(o) for o in new_op]
                    else:  # suppose it is a variable on the GPU
                        return [host_from_gpu(new_op)]
            return False
        local_opt.__name__ = maker.__name__
        return local_optimizer(OP)(local_opt)
    return f 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:60,代码来源:opt.py


注:本文中的theano.Op方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。