當前位置: 首頁>>代碼示例>>Python>>正文


Python tensor.ftensor4方法代碼示例

本文整理匯總了Python中theano.tensor.ftensor4方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.ftensor4方法的具體用法?Python tensor.ftensor4怎麽用?Python tensor.ftensor4使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.ftensor4方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_blocksparse_inplace_gemv_opt

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_blocksparse_inplace_gemv_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    f = theano.function([W, h, iIdx, b, oIdx], o)
    assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')

    if theano.config.mode == "FAST_COMPILE":
        assert not f.maker.fgraph.toposort()[-1].op.inplace
    else:
        assert f.maker.fgraph.toposort()[-1].op.inplace 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:18,代碼來源:test_opt.py

示例2: test_blocksparse_inplace_outer_opt

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_blocksparse_inplace_outer_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    theano.printing.debugprint(tensor.grad(o.sum(), wrt=W))

    f = theano.function([W, h, iIdx, b, oIdx],
                        [o, tensor.grad(o.sum(), wrt=W)])
    assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')

    if theano.config.mode == "FAST_COMPILE":
        assert not f.maker.fgraph.toposort()[-1].op.inplace
    else:
        assert f.maker.fgraph.toposort()[-1].op.inplace 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:21,代碼來源:test_opt.py

示例3: test_sparseblockdot

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_sparseblockdot(self):
        """
        Compares the numpy version of sparseblockgemv to sparse_block_dot.
        """
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        o = sparse_block_dot(W, h, iIdx, b, oIdx)

        f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = \
            BlockSparse_Gemv_and_Outer.gemv_data()

        th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val)

        ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
            b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)

        utt.assert_allclose(ref_out, th_out) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:25,代碼來源:test_blocksparse.py

示例4: test_sparseblockgemv

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_sparseblockgemv(self):
        """
        Compares the numpy and theano versions of sparseblockgemv.
        """
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)

        f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = \
            BlockSparse_Gemv_and_Outer.gemv_data()

        th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val)
        ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
            b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)

        utt.assert_allclose(ref_out, th_out) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:24,代碼來源:test_blocksparse.py

示例5: test_sparseblockouter

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_sparseblockouter(self):
        o = tensor.ftensor4()
        x = tensor.ftensor3()
        y = tensor.ftensor3()
        xIdx = tensor.imatrix()
        yIdx = tensor.imatrix()

        out = self.outer_op(o, x, y, xIdx, yIdx)

        f = theano.function([o, x, y, xIdx, yIdx], out,
                            on_unused_input="warn", mode=self.mode)

        o_val, x_val, y_val, xIdx_val, yIdx_val = \
            BlockSparse_Gemv_and_Outer.outer_data()

        th_out = f(o_val, x_val, y_val, xIdx_val, yIdx_val)
        ref_out = BlockSparse_Gemv_and_Outer.outer_numpy(
            o_val, x_val, y_val, xIdx_val, yIdx_val)

        utt.assert_allclose(ref_out, th_out) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:22,代碼來源:test_blocksparse.py

示例6: test_logical_shapes

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_logical_shapes(self):
        # Logical shapes are not supported anymore, so we check that it
        # raises an Exception.
        for stride in range(1, 4):
            kshp = (10, 2, 10, 10)
            featshp = (3, 10, 11, 11)

            a = tensor.ftensor4()
            A = tensor.ftensor4()

            # Need to transpose first two dimensions of kernel, and reverse
            # index kernel image dims (for correlation)
            kernel_rotated = tensor.transpose(A, axes=[1, 0, 2, 3])

            featshp_logical = (featshp[0], featshp[1], featshp[2] * stride,
                               featshp[3] * stride)
            kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
            self.assertRaises(ValueError, tensor.nnet.conv2d,
                              a, kernel_rotated,
                              border_mode='full',
                              image_shape=featshp,
                              filter_shape=kshp_rotated,
                              imshp_logical=featshp_logical[1:],
                              kshp_logical=kshp[2:]) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:26,代碼來源:test_conv_cuda_ndarray.py

示例7: test_dnn_conv_merge_mouts

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_dnn_conv_merge_mouts():
    # make sure it doesn't attempt to output/alpha merge a convolution
    # that has multiple clients.
    if not cuda.dnn.dnn_available():
        raise SkipTest(cuda.dnn.dnn_available.msg)
    img = T.ftensor4()
    kern = T.ftensor4()
    out = T.ftensor4()

    conv = dnn.dnn_conv(img, kern)

    lr = numpy.asarray(0.05, dtype='float32')

    if cuda.dnn.version() == -1:
        # Can't merge alpha with cudnn v1
        fr = conv + out
    else:
        fr = lr * (conv + out)
    rr = conv * lr

    f = theano.function([img, kern, out], [fr, rr], mode=mode_with_gpu)
    convs = [n for n in f.maker.fgraph.toposort()
             if isinstance(n.op, dnn.GpuDnnConv)]
    assert len(convs) == 1 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:26,代碼來源:test_dnn.py

示例8: test_dnn_conv_merge_broad

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_dnn_conv_merge_broad():
    # Make sure that we don't apply output_merge on broadcasted values.
    if not cuda.dnn.dnn_available():
        raise SkipTest(cuda.dnn.dnn_available.msg)
    img = T.ftensor4()
    kern = T.ftensor4()

    conv = dnn.dnn_conv(img, kern)

    lr = numpy.asarray(0.05, dtype='float32')

    # this does broadcasting
    fr = conv + lr

    f = theano.function([img, kern], [fr])
    convs = [n for n in f.maker.fgraph.toposort()
             if isinstance(n.op, dnn.GpuDnnConv)]
    assert len(convs) == 1
    conv = convs[0]
    # Assert output was not merged
    assert isinstance(conv.inputs[2].owner.op, GpuAllocEmpty) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:23,代碼來源:test_dnn.py

示例9: check_theano

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def check_theano():
    """Checks if theano is installed correctly."""
    try:
        import theano
        import theano.tensor as T

        # Check float type.
        if theano.config.floatX != "float32":
            logging.error("Theano float type must be float32. Add "
                          "floatX=float32 to your .theanorc.")
        else:
            logging.info("Theano float is float32.")

        # Check if cudnn softmax is available.
        try:
            from dltools import architectures
            architectures.FRRNBuilderBase.log_softmax_4d(T.ftensor4())
            logging.info("cuDNN spatial softmax found.")
        except:
            logging.error("Cannot create cuDNN spatial log softmax. Install "
                          "cuDNN and make sure that theano uses the GPU.")
    except:
        logging.error("Cannot import theano.") 
開發者ID:TobyPDE,項目名稱:FRRN,代碼行數:25,代碼來源:check_dependencies.py

示例10: test_blocksparse_inplace_outer_opt

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_blocksparse_inplace_outer_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    theano.printing.debugprint(tensor.grad(o.sum(), wrt=W))

    f = theano.function([W, h, iIdx, b, oIdx],
                        [o, tensor.grad(o.sum(), wrt=W)])

    if theano.config.mode == "FAST_COMPILE":
        assert not f.maker.fgraph.toposort()[-1].op.inplace
    else:
        assert f.maker.fgraph.toposort()[-1].op.inplace 
開發者ID:rizar,項目名稱:attention-lvcsr,代碼行數:20,代碼來源:test_opt.py

示例11: test_default_conv

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_default_conv():
    """Just test that we introduce the right GPU convolution
    version.

    """
    img = theano.tensor.ftensor4()
    fil = theano.tensor.ftensor4()

    c = theano.tensor.nnet.conv2d(img, fil)
    f = theano.function([img, fil], c, mode=theano_mode)

    if cuda.dnn.dnn_available():
        assert any([isinstance(a.op, GpuDnnConv)
                    for a in f.maker.fgraph.apply_nodes])
    else:
        assert any([isinstance(a.op, cuda.blas.GpuCorrMM)
                    for a in f.maker.fgraph.apply_nodes]) 
開發者ID:rizar,項目名稱:attention-lvcsr,代碼行數:19,代碼來源:test_conv_cuda_ndarray.py

示例12: test_sparseblockgemvF

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_sparseblockgemvF(self):
        """
            Test the fortan order for W (which can happen in the grad for some
            graphs).
        """
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        o = self.gemv_op(b.take(oIdx, axis=0),
                         tensor.DimShuffle((False, False, False, False),
                                           (0, 1, 3, 2))
                         (tensor.as_tensor_variable(W)),
                         h, iIdx, oIdx)

        f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = \
            BlockSparse_Gemv_and_Outer.gemv_data()

        th_out = f(numpy.swapaxes(W_val, 2, 3), h_val, iIdx_val, b_val,
                   oIdx_val)
        ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
            b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)

        utt.assert_allclose(ref_out, th_out) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:30,代碼來源:test_blocksparse.py

示例13: test_dot_infershape

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_dot_infershape(self):
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        self._compile_and_check([W, h, iIdx, b, oIdx],
                                [sparse_block_dot(W, h, iIdx, b, oIdx)],
                                self.gemv_data(),
                                self.gemv_class) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:13,代碼來源:test_blocksparse.py

示例14: test_gemv_infershape

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_gemv_infershape(self):
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        self._compile_and_check(
            [W, h, iIdx, b, oIdx],
            [self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)],
            self.gemv_data(),
            self.gemv_class) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:14,代碼來源:test_blocksparse.py

示例15: test_outer_infershape

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ftensor4 [as 別名]
def test_outer_infershape(self):
        o = tensor.ftensor4()
        x = tensor.ftensor3()
        y = tensor.ftensor3()
        xIdx = tensor.imatrix()
        yIdx = tensor.imatrix()

        self._compile_and_check([o, x, y, xIdx, yIdx],
                                [self.outer_op(o, x, y, xIdx, yIdx)],
                                self.outer_data(),
                                self.outer_class) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:13,代碼來源:test_blocksparse.py


注:本文中的theano.tensor.ftensor4方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。