当前位置: 首页>>代码示例>>Python>>正文


Python basic_ops.as_cuda_ndarray_variable函数代码示例

本文整理汇总了Python中theano.sandbox.cuda.basic_ops.as_cuda_ndarray_variable函数的典型用法代码示例。如果您正苦于以下问题:Python as_cuda_ndarray_variable函数的具体用法?Python as_cuda_ndarray_variable怎么用?Python as_cuda_ndarray_variable使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了as_cuda_ndarray_variable函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_node

    def make_node(self, images, top_down):
        """
        .. todo::

            WRITEME
        """
        images = as_cuda_ndarray_variable(images)
        top_down = as_cuda_ndarray_variable(top_down)

        assert images.ndim == 4
        assert top_down.ndim == 4

        channels_broadcastable = images.type.broadcastable[0]
        batch_broadcastable = images.type.broadcastable[3]

        rows_broadcastable = False
        cols_broadcastable = False

        houtput_broadcastable = (channels_broadcastable, rows_broadcastable,
                cols_broadcastable, batch_broadcastable)
        houtput_type = CudaNdarrayType(broadcastable=houtput_broadcastable)
        houtput = houtput_type()

        poutput_broadcastable = (channels_broadcastable, rows_broadcastable,
                cols_broadcastable, batch_broadcastable)
        poutput_type = CudaNdarrayType(broadcastable=poutput_broadcastable)
        poutput = poutput_type()

        return Apply(self, [images, top_down], [houtput, poutput])
开发者ID:123fengye741,项目名称:pylearn2,代码行数:29,代码来源:probabilistic_max_pooling.py

示例2: make_node

    def make_node(self, inp1, inp2):
        inp1 = as_cuda_ndarray_variable(inp1)
        inp2 = as_cuda_ndarray_variable(inp2)

        assert inp1.ndim == 2
        assert inp2.ndim == 2
        return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])
开发者ID:LEEKYOUNGHUN,项目名称:Theano,代码行数:7,代码来源:cula.py

示例3: make_node

 def make_node(self, X, DY):
   X = gpu_contiguous(as_cuda_ndarray_variable(X))
   DY = gpu_contiguous(as_cuda_ndarray_variable(DY))
   assert X.dtype == "float32"
   assert DY.dtype == "float32"
   assert X.ndim == 4
   assert DY.ndim == 4
   return theano.Apply(self, [X, DY], [X.type()])
开发者ID:chagge,项目名称:returnn,代码行数:8,代码来源:PoolHWBCOp.py

示例4: make_node

 def make_node(self, o, x, y, xIdx, yIdx, alpha=None):
     one = tensor.constant(numpy.asarray(1.0, dtype="float32"))
     o = basic_ops.as_cuda_ndarray_variable(o)
     x = basic_ops.as_cuda_ndarray_variable(x)
     y = basic_ops.as_cuda_ndarray_variable(y)
     if alpha is None:
         alpha = one
     return Apply(self, [o, x, y, xIdx, yIdx, alpha], [o.type()])
开发者ID:poolio,项目名称:Theano,代码行数:8,代码来源:blocksparse.py

示例5: make_node

 def make_node(self, X, sizes):
   X = gpu_contiguous(as_cuda_ndarray_variable(X))
   sizes = gpu_contiguous(as_cuda_ndarray_variable(sizes))
   assert X.dtype == "float32"
   assert X.ndim == 4
   assert sizes.dtype == "float32"
   assert sizes.ndim == 2
   return theano.Apply(self, [X, sizes], [X.type()])
开发者ID:atuxhe,项目名称:returnn,代码行数:8,代码来源:CropToBatchImageSizeOp.py

示例6: make_node

 def make_node(self, x, b, y_idx):
     # N.B. won't work when we don't cast y_idx to float anymore
     x = as_cuda_ndarray_variable(x)
     b = as_cuda_ndarray_variable(b)
     y_idx = as_cuda_ndarray_variable(y_idx)
     nll = y_idx.type()
     sm = x.type()
     am = y_idx.type()
     return Apply(self, [x, b, y_idx], [nll, sm, am])
开发者ID:Ambier,项目名称:Theano,代码行数:9,代码来源:nnet.py

示例7: h_softmax_gpu

def h_softmax_gpu(W1, b1, W2, b2, x, n_outputs, n_classes,
                  n_outputs_per_class, batch_size, target=None):
    """
    GPU-only version of a two-layer hierarchical softmax.
    See hierarchical_softmax's docstring for the description of the arguments.
    """
    W1 = as_cuda_ndarray_variable(W1)
    b1 = as_cuda_ndarray_variable(b1)
    W2 = as_cuda_ndarray_variable(W2)
    b2 = as_cuda_ndarray_variable(b2)
    x = as_cuda_ndarray_variable(x)

    # First softmax which computes the probabilities of belonging to each class
    class_probs = tensor.nnet.softmax(tensor.dot(x, W1) + b1)

    if target is None:
        # Computes the probabilites of all the outputs

        class_ids = tensor.tile(tensor.arange(n_classes, dtype="int32")[None, :], (batch_size, 1))

        # Second softmax that computes the output probabilities
        activations = sparse_block_dot_SS(
            W2[None, :, :, :], x[:, None, :],
            tensor.zeros((batch_size, 1), dtype='int32'), b2, class_ids)

        output_probs = tensor.nnet.softmax(activations.reshape((-1, n_outputs_per_class)))
        output_probs = output_probs.reshape((batch_size, n_classes, -1))
        output_probs = class_probs[:, :, None] * output_probs
        output_probs = output_probs.reshape((batch_size, -1))
        output_probs = output_probs[:, :n_outputs]

    else:
        # Computes the probabilities of the outputs specified by the targets

        # Flattens the targets
        target = target.flatten()

        # Classes to which belong each target
        target_classes = target // n_outputs_per_class

        # Outputs to which belong each target inside a class
        target_outputs_in_class = target % n_classes

        # Second softmax that computes the output probabilities
        activations = sparse_block_dot_SS(
            W2[None, :, :, :], x[:, None, :],
            tensor.zeros((batch_size, 1), dtype='int32'), b2,
            target_classes[:, None])

        output_probs = tensor.nnet.softmax(activations[:, 0, :])
        target_class_probs = class_probs[tensor.arange(batch_size), target_classes]
        output_probs = output_probs[tensor.arange(batch_size),
                                    target_outputs_in_class]
        output_probs = target_class_probs * output_probs

    return output_probs
开发者ID:adbrebs,项目名称:h_softmax_theano,代码行数:56,代码来源:h_softmax.py

示例8: local_gpu_conv3d

def local_gpu_conv3d(node):
    if isinstance(node.op, Conv3D):
        if numpy.any([i.owner and isinstance(i.owner.op, HostFromGpu)
                      for i in node.inputs]):
            if numpy.all([o.type.dtype == 'float32' for o in node.outputs]):
                V, W, b, d = node.inputs
                return [host_from_gpu(gpu_convd(as_cuda_ndarray_variable(V),
                                                as_cuda_ndarray_variable(W),
                                                as_cuda_ndarray_variable(b),
                                                d))]
开发者ID:317070,项目名称:Theano,代码行数:10,代码来源:GpuConv3D.py

示例9: make_node

    def make_node(self, inp1, inp2):
        inp1 = basic_ops.gpu_contiguous(basic_ops.as_cuda_ndarray_variable(inp1))
        inp2 = basic_ops.gpu_contiguous(basic_ops.as_cuda_ndarray_variable(inp2))

        assert inp1.dtype == "float32"
        assert inp2.dtype == "float32"
        assert inp1.ndim == 4  # (batch, a, b, real/imag)
        assert inp2.ndim == 4

        return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])
开发者ID:cfsmile,项目名称:Theano,代码行数:10,代码来源:fftconv.py

示例10: local_gpu_conv_grad3d

def local_gpu_conv_grad3d(node):
    if isinstance(node.op, ConvGrad3D):
        if numpy.any([i.owner and isinstance(i.owner.op, HostFromGpu)
                      for i in node.inputs]):
            if numpy.all([o.type.dtype == 'float32' for o in node.outputs]):
                V, d, WShape, dCdH = node.inputs
                return [host_from_gpu(gpu_conv_grad3d(
                    as_cuda_ndarray_variable(V),
                    d,
                    WShape,
                    as_cuda_ndarray_variable(dCdH)))]
开发者ID:5730279821-TA,项目名称:Theano,代码行数:11,代码来源:GpuConvGrad3D.py

示例11: make_node

 def make_node(self, X, W, b):
   X = gpu_contiguous(as_cuda_ndarray_variable(X))
   W = gpu_contiguous(as_cuda_ndarray_variable(W))
   b = gpu_contiguous(as_cuda_ndarray_variable(b))
   assert X.dtype == "float32"
   assert W.dtype == "float32"
   assert b.dtype == "float32"
   assert X.ndim == 4
   assert W.ndim == 4
   assert b.ndim == 1
   return theano.Apply(self, [X, W, b], [X.type()])
开发者ID:chagge,项目名称:returnn,代码行数:11,代码来源:CuDNNConvHWBCOp.py

示例12: make_node

    def make_node(self, V, U, UinvT, Q, H, Y_indexes, Y_values, learning_rate,
                  use_qtilde=0, use_lower=1, invup_mode=1,
                  stabilize_period=10, unfactorize_period=100,debug_print=0):

        # The following are supposed to reside on the GPU
        V = as_cuda_ndarray_variable(V)
        U = as_cuda_ndarray_variable(U)
        UinvT = as_cuda_ndarray_variable(UinvT)
        Q = as_cuda_ndarray_variable(Q)
        H = as_cuda_ndarray_variable(H)

        # The following are on the CPU
        Y_indexes = as_tensor_variable(Y_indexes)
        Y_values = as_tensor_variable(Y_values)
        learning_rate = as_tensor_variable(learning_rate)
        use_qtilde = as_tensor_variable(use_qtilde)
        use_lower = as_tensor_variable(use_lower)
        invup_mode = as_tensor_variable(invup_mode)
        stabilize_period = as_tensor_variable(stabilize_period)
        unfactorize_period = as_tensor_variable(unfactorize_period)
        debug_print = as_tensor_variable(debug_print)

        # print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
        # for k,v in locals().items():
        #     print k,':',type(v)
        # print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"

        params = [V, U, UinvT, Q, H, Y_indexes, Y_values, learning_rate,
                  use_qtilde, use_lower, invup_mode, stabilize_period,
                  unfactorize_period, debug_print]

        # make sure parameters are either all of dtype float32 or all of dtype float64 (except for Y_indexes which are integers)
        elem_type = V.dtype
        if elem_type != "float32" and elem_type != "float64":
            raise TypeError("LargeSparseTargets parameter V must have dtype of float32 or float64")

        check_tensor_variables_ndim_and_dtype(0, elem_type, ["learning_rate"], locals() )
        check_tensor_variables_ndim_and_dtype(2, elem_type, ["V", "U", "UinvT", "Q", "H", "Y_values"], locals() )
        check_tensor_variables_ndim_and_dtype(2, "int32", ["Y_indexes"], locals() )

        # T.matrix(elem_type)
            
        # Now properly set up outputs to compute
        if self.what_to_output==0: # output scalar cost
            outputs = [ T.scalar(elem_type) ]
        elif self.what_to_output==1: # output grad_H
            outputs = [ CudaNdarrayType(broadcastable=(False,False))() ]
        elif self.what_to_output==2: # output cost and grad_H
            outputs = [ T.scalar(elem_type), CudaNdarrayType(broadcastable=(False,False))() ]
        else:
            raise ValueError("Invalid value for what_to_output: must be 0,1, or 2")
        
        return Apply(self, params, outputs)
开发者ID:adbrebs,项目名称:factored_output_layer,代码行数:53,代码来源:op.py

示例13: make_node

    def make_node(self, W, b, d, H, RShape=None):
        W_ = as_cuda_ndarray_variable(W)
        b_ = as_cuda_ndarray_variable(b)
        d_ = T.as_tensor_variable(d)
        H_ = as_cuda_ndarray_variable(H)
        if RShape:
            RShape_ = T.as_tensor_variable(RShape)
        else:
            RShape_ = T.as_tensor_variable([-1, -1, -1])

        return theano.Apply(self, inputs=[W_, b_, d_, H_, RShape_],
                            outputs=[CudaNdarrayType(dtype=H_.dtype,
                                                     broadcastable=(False,)*5)()])
开发者ID:317070,项目名称:Theano,代码行数:13,代码来源:GpuConvTransp3D.py

示例14: make_node

    def make_node(self, V, W, b, d):
        """
            :param V: Visible unit, input
            :param W: Weights, filter
            :param b: bias
            :param d: strides when moving the filter over the input
        """
        V_ = as_cuda_ndarray_variable(V)
        W_ = as_cuda_ndarray_variable(W)
        b_ = as_cuda_ndarray_variable(b)
        d_ = T.as_tensor_variable(d)

        return theano.Apply(self, inputs=[V_, W_, b_, d_],
                            outputs = [ CudaNdarrayType(dtype=V_.dtype, broadcastable=(V_.broadcastable[0],W_.broadcastable[0],False,False,False))() ] )
开发者ID:Dimitris0mg,项目名称:Theano,代码行数:14,代码来源:GpuConv3D.py

示例15: make_node

 def make_node(self, X, DY, regions_y, regions_x):
   X = gpu_contiguous(as_cuda_ndarray_variable(X))
   assert X.dtype == "float32"
   assert X.ndim == 4
   DY = gpu_contiguous(as_cuda_ndarray_variable(DY))
   assert DY.dtype == "float32"
   assert DY.ndim == 4
   regions_y = gpu_contiguous(as_cuda_ndarray_variable(regions_y))
   assert regions_y.dtype == "float32"
   assert regions_y.ndim == 2
   regions_x = gpu_contiguous(as_cuda_ndarray_variable(regions_x))
   assert regions_x.dtype == "float32"
   assert regions_x.ndim == 2, regions_x.ndim
   return theano.Apply(self, [X, DY, regions_y, regions_x], [X.type()])
开发者ID:chagge,项目名称:returnn,代码行数:14,代码来源:FractionalMaxPoolingOp.py


注:本文中的theano.sandbox.cuda.basic_ops.as_cuda_ndarray_variable函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。