當前位置: 首頁>>代碼示例>>Python>>正文


Python theano.Op方法代碼示例

本文整理匯總了Python中theano.Op方法的典型用法代碼示例。如果您正苦於以下問題:Python theano.Op方法的具體用法?Python theano.Op怎麽用?Python theano.Op使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano的用法示例。


在下文中一共展示了theano.Op方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: local_gpu_elemwise_careduce

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def local_gpu_elemwise_careduce(node):
    """
    Merge some GpuCAReduceCuda and GPUElemwise.

    """
    if (isinstance(node.op, GpuCAReduceCuda) and
            node.op.pre_scalar_op is None and
            node.inputs[0].owner and
            isinstance(node.inputs[0].owner.op, GpuElemwise) and
            # The Op support all scalar with 1 inputs.  We don't
            # automatically add more case, as some like trigonometic
            # operation with some reduction pattern will probably results
            # in slow down.
            isinstance(node.inputs[0].owner.op.scalar_op, scalar.basic.Sqr)):
        op = node.op
        inp = node.inputs[0].owner.inputs[0]
        return [GpuCAReduceCuda(scalar_op=op.scalar_op,
                                axis=op.axis,
                                reduce_mask=op.reduce_mask,
                                pre_scalar_op=scalar.basic.sqr)(inp)] 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:22,代碼來源:opt.py

示例2: in_shape

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def in_shape(self, output_shape):
        out_dims = list(output_shape[:self.n_dims_before])
        num_strides = []

        # in the inverse case we don't worry about borders:
        # they either have been filled with zeros, or have been cropped
        for i, ds in enumerate(self.dims_neighbourhoods):
            # the number of strides performed by NeighFromImg is
            # directly given by this shape
            num_strides.append(output_shape[self.n_dims_before + i])

            # our Op's output image must be at least this wide
            at_least_width = num_strides[i] * self.strides[i]

            # ... which gives us this number of neighbourhoods
            num_neigh = at_least_width // ds
            if at_least_width % ds != 0:
                num_neigh += 1

            # making the final Op's output dimension this wide
            out_dims.append(num_neigh * ds)

        return out_dims, num_strides 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:25,代碼來源:neighbourhoods.py

示例3: _py_assignment

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def _py_assignment(self):
        # TODO : need description for method and return
        input_idx = "".join(["outer_idx_%d," % (i,)
                            for i in xrange(self.n_dims_before)])
        input_idx += "".join(["dim_%d_offset+neigh_idx_%d," %
                             (i, i) for i in xrange(len(self.strides))])
        out_idx = "".join(
            ["outer_idx_%d," % (i,) for i in xrange(self.n_dims_before)] +
            ["stride_idx_%d," % (i,) for i in xrange(len(self.strides))])
        out_idx += self._py_flattened_idx()

        # return_val = '\t' * (self.n_dims_before + len(self.strides)*2)
        # return_val += "print "+input_idx+"'\\n',"+out_idx+"\n"

        return_val = '\t' * (self.n_dims_before + len(self.strides) * 2)

        if self.inverse:
            # remember z and x are inversed:
            # z is the Op's output, but has input_shape
            # x is the Op's input, but has out_shape
            return_val += "z[0][%s] = x[%s]\n" % (input_idx, out_idx)
        else:
            return_val += "z[0][%s] = x[%s]\n" % (out_idx, input_idx)

        return return_val 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:27,代碼來源:neighbourhoods.py

示例4: _py_assignment

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def _py_assignment(self):
        input_idx = "".join(["outer_idx_%d," % (i,)
                            for i in xrange(self.n_dims_before)])
        input_idx += "".join(["dim_%d_offset+neigh_idx_%d," %
                             (i, i) for i in xrange(len(self.strides))])
        out_idx = "".join(
            ["outer_idx_%d," % (i,) for i in xrange(self.n_dims_before)] +
            ["stride_idx_%d," % (i,) for i in xrange(len(self.strides))])
        out_idx += self._py_flattened_idx()

        # return_val = '\t' * (self.n_dims_before + len(self.strides)*2)
        # return_val += "print "+input_idx+"'\\n',"+out_idx+"\n"

        return_val = '\t' * (self.n_dims_before + len(self.strides) * 2)

        if self.inverse:
            # remember z and x are inversed:
            # z is the Op's output, but has input_shape
            # x is the Op's input, but has out_shape
            return_val += "z[0][%s] = x[%s]\n" % (input_idx, out_idx)
        else:
            return_val += "z[0][%s] = x[%s]\n" % (out_idx, input_idx)

        return return_val 
開發者ID:rizar,項目名稱:attention-lvcsr,代碼行數:26,代碼來源:neighbourhoods.py

示例5: __init__

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def __init__(self, inputDim=None, outputDim=None, activation=None):
        """
        :type inputDim: tuple of [int]
        :param inputDim: dimensionality of input

        :type outputDim: tuple of [int]
        :param outputDim: number of hidden units

        :type activation: theano.Op or function
        :param activation: Non linearity to be applied in the hidden layer
        """

        super(NonlinearityLayerParams, self).__init__(inputDim, outputDim)

        self._outputDim = self._inputDim
        self._activation = activation 
開發者ID:moberweger,項目名稱:deep-prior-pp,代碼行數:18,代碼來源:nonlinearitylayer.py

示例6: __init__

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def __init__(self, inputDim=None, outputDim=None, activation=None, hasBias=True, init_method=None):
        """
        :type inputDim: tuple of [int]
        :param inputDim: dimensionality of input

        :type outputDim: tuple of [int]
        :param outputDim: number of hidden units

        :type activation: theano.Op or function
        :param activation: Non linearity to be applied in the hidden layer
        """

        super(HiddenLayerParams, self).__init__(inputDim, outputDim)

        self._activation = activation
        self._hasbias = hasBias
        self._init_method = init_method 
開發者ID:moberweger,項目名稱:deep-prior-pp,代碼行數:19,代碼來源:hiddenlayer.py

示例7: infer_shape

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def infer_shape(self, node, input_shapes):
        """Return a list of output shapes based on ``input_shapes``.

        This method is optional. It allows to compute the shape of the
        output without having to evaluate.

        Parameters
        ----------
        node : `theano.gof.graph.Apply`
            The node of this Op in the computation graph.
        input_shapes : 1-element list of `theano.compile.ops.Shape`
            Symbolic shape of the input.

        Returns
        -------
        output_shapes : 1-element list of tuples
            Fixed shape of the output determined by `odl_op`.
        """
        if isinstance(self.operator, Functional):
            return [()]
        else:
            # Need to convert to native to avoid error in Theano from
            # future.int
            return [tuple(native(si) for si in self.operator.range.shape)] 
開發者ID:odlgroup,項目名稱:odl,代碼行數:26,代碼來源:layer.py

示例8: __init__

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def __init__(self, rng, input, n_in, n_out, activation=T.tanh, name_prefix=''):
        """
        Typical hidden layer of a MLP: units are fully-connected and have
        sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
        and the bias vector b is of shape (n_out,).

        NOTE : The nonlinearity used here is tanh

        Hidden unit activation is given by: tanh(dot(input,W) + b)

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dmatrix
        :param input: a symbolic tensor of shape (n_examples, n_in)

        :type n_in: int
        :param n_in: dimensionality of input

        :type n_out: int
        :param n_out: number of hidden units

        :type activation: theano.Op or function
        :param activation: Non linearity to be applied in the hidden
                              layer
        """
        self.input = input

        # `W` is initialized with `W_values` which is uniformely sampled
        # from -6./sqrt(n_in+n_hidden) and 6./sqrt(n_in+n_hidden)
        # the output of uniform if converted using asarray to dtype
        # theano.config.floatX so that the code is runable on GPU
        W_values = numpy.asarray( rng.uniform( \
              low=-numpy.sqrt(6./(n_in+n_out)), \
              high=numpy.sqrt(6./(n_in+n_out)), \
              size=(n_in, n_out)), dtype=theano.config.floatX)
        self.W = theano.shared(value=W_values, name=name_prefix+'W')

        self.output = T.dot(input, self.W)
        # parameters of the model
        self.params = [self.W] 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:43,代碼來源:mlp_test.py

示例9: dnn_pool

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def dnn_pool(img, ws, stride=(1, 1), mode='max', pad=(0, 0)):
    """
    GPU pooling using cuDNN from NVIDIA.

    The memory layout to use is 'bc01', that is 'batch', 'channel',
    'first dim', 'second dim' in that order.

    `ws`, `stride` and `pad` must have the same length.

    Parameters
    ----------
    img
        Images to do the pooling over.
    ws : tuple
        Subsampling window size.
    stride : tuple
        Subsampling stride (default: (1, 1)).
    mode : {'max', 'average_inc_pad', 'average_exc_pad'}
    pad : tuple
        (padX, padY) or (padX, padY, padZ)
        default: (0, 0)

    .. warning:: The cuDNN library only works with GPU that have a compute
        capability of 3.0 or higer.  This means that older GPU will not
        work with this Op.

    Notes
    -----
    This Op implements the ignore_border=True of max_pool_2d.

    """
    img = gpu_contiguous(img)
    return GpuDnnPool(mode=mode)(img, ws, stride, pad) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:35,代碼來源:dnn.py

示例10: grad

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def grad(self, inputs, output_grads):
        return [output_grads[0] * inputs[1], output_grads[0] * inputs[0]]


# 2. Op returns x + y and x - y 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:7,代碼來源:extending_theano_solution_1.py

示例11: dnn_pool

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def dnn_pool(img, ws, stride=(1, 1), mode='max', pad=(0, 0)):
    """
    GPU pooling using cuDNN from NVIDIA.

    The memory layout to use is 'bc01', that is 'batch', 'channel',
    'first dim', 'second dim' in that order.

    `ws`, `stride` and `pad` must have the same length.

    Parameters
    ----------
    img
        Images to do the pooling over.
    ws : tuple
        Subsampling window size.
    stride : tuple
        Subsampling stride (default: (1, 1)).
    mode : {'max', 'average_inc_pad', 'average_exc_pad'}
    pad : tuple
        (padX, padY) or (padX, padY, padZ)
        default: (0, 0)

    .. warning:: The cuDNN library only works with GPU that have a compute
        capability of 3.0 or higer.  This means that older GPU will not
        work with this Op.

    Notes
    -----
    This Op implements the ignore_border=True of max_pool_2d.

    """
    img = gpu_contiguous(img)
    desc = GpuDnnPoolDesc(ws=ws, stride=stride, mode=mode, pad=pad)()
    return GpuDnnPool()(img, desc) 
開發者ID:rizar,項目名稱:attention-lvcsr,代碼行數:36,代碼來源:dnn.py

示例12: __init__

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def __init__(self, inputDim=None, outputDim=None, activation=None):
        """
        :type inputDim: tuple of [int]
        :param inputDim: dimensionality of input

        :type outputDim: tuple of [int]
        :param outputDim: number of hidden units

        :type activation: theano.Op or function
        :param activation: Non linearity to be applied in the hidden layer
        """

        super(HiddenLayerParams, self).__init__(inputDim, outputDim)

        self._activation = activation 
開發者ID:moberweger,項目名稱:deep-prior,代碼行數:17,代碼來源:hiddenlayer.py

示例13: make_node

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def make_node(self, inp):
        if not cusolver_available:
            raise RuntimeError('CUSOLVER is not available and '
                               'GpuLU Op can not be constructed.')
        if skcuda.__version__ <= '0.5.1':
            warnings.warn('The GpuLU op requires scikit-cuda > 0.5.1 to work with CUDA 8')
        if not pygpu_available:
            raise RuntimeError('Missing pygpu or triu/tril functions.'
                               'Install or update libgpuarray.')
        context_name = infer_context_name(inp)

        inp = as_gpuarray_variable(inp, context_name)

        inp = gpu_contiguous(inp)

        # this op can only operate on float32 matrices
        # because of current implementation of triu/tril.
        # TODO: support float64
        assert inp.ndim == 2
        assert inp.dtype == 'float32'

        # outputs LU in a single matrix, and a pivots array
        pivots_type = GpuArrayType('int32',
                                   broadcastable=inp[0].broadcastable,
                                   context_name=context_name)()
        return theano.Apply(self, [inp], [inp.type(), pivots_type]) 
開發者ID:mcgillmrl,項目名稱:kusanagi,代碼行數:28,代碼來源:extra_ops.py

示例14: _add_to_cache

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def _add_to_cache(self, module, key, module_hash):
        """
        This function expects the compile lock to be held.

        """
        name = module.__file__
        _logger.debug("Adding module to cache %s %s",
                      key, name)
        # Changing the hash of the key is not allowed during
        # compilation. That is the only cause found that makes
        # the following assert fail.
        assert key not in self.entry_from_key

        location = os.path.dirname(name)
        key_pkl = os.path.join(location, 'key.pkl')
        assert not os.path.exists(key_pkl)
        key_data = KeyData(
            keys=set([key]),
            module_hash=module_hash,
            key_pkl=key_pkl,
            entry=name)

        key_broken = False
        if key[0]:
            try:
                key_data.save_pkl()
            except pickle.PicklingError:
                key_broken = True
                key_data.remove_key(key)
                key_data.save_pkl()
            if not key_broken and self.check_for_broken_eq:
                self.check_key(key, key_pkl)
            self.loaded_key_pkl.add(key_pkl)
        elif config.cmodule.warn_no_version:
            key_flat = flatten(key)
            ops = [k for k in key_flat if isinstance(k, theano.Op)]
            _logger.warning("not all the"
                            " following op(s) implement"
                            " c_code_cache_version(). This makes them"
                            " recompiled for each process." + str(ops))
        self._update_mappings(key, key_data, module.__file__, not key_broken)
        return key_data 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:44,代碼來源:cmodule.py

示例15: op_lifter

# 需要導入模塊: import theano [as 別名]
# 或者: from theano import Op [as 別名]
def op_lifter(OP, cuda_only=False):
    """
    OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))

    gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)

    """
    def f(maker):
        def local_opt(node):
            if type(node.op) in OP:
                # Either one of our inputs is on the gpu or
                # all of our clients are on the gpu
                replace = False
                # TODO: Maybe set context_name with infer_context_name()?
                context_name = None
                # We replace if any input is a host_from_gpu
                for i in node.inputs:
                    if i.owner and i.owner.op == host_from_gpu:
                        context_name = i.owner.inputs[0].type.context_name
                        replace = True
                        break
                if not replace:
                    # We replace if *all* clients are on the GPU
                    clients = [c for o in node.outputs for c in o.clients]
                    replace = len(clients) != 0
                    for c, idx in clients:
                        if (c == 'output' or
                                not isinstance(c.op, GpuFromHost)):
                            replace = False
                    # TODO: check that the clients want the same context?
                    if replace:
                        # All clients are GpuFromHost and we have at least one
                        context_name = clients[0][0].op.context_name

                # Check if we should replace
                if (not replace or
                    (cuda_only and
                     get_context(context_name).kind != 'cuda')):
                    return False

                # tag the inputs with the context in case
                # the context was derived from the outputs
                for i in node.inputs:
                    i.tag.context_name = context_name
                new_op = maker(node, context_name)
                # This is needed as sometimes new_op inherits from OP.
                if new_op and new_op != node.op:
                    if isinstance(new_op, theano.Op):
                        return [safe_to_cpu(o) for o in
                                new_op(*node.inputs, return_list=True)]
                    elif isinstance(new_op, (tuple, list)):
                        return [safe_to_cpu(o) for o in new_op]
                    else:  # suppose it is a variable on the GPU
                        return [host_from_gpu(new_op)]
            return False
        local_opt.__name__ = maker.__name__
        return local_optimizer(OP)(local_opt)
    return f 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:60,代碼來源:opt.py


注:本文中的theano.Op方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。