当前位置: 首页>>代码示例>>Python>>正文


Python tensor.Elemwise方法代码示例

本文整理汇总了Python中theano.tensor.Elemwise方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.Elemwise方法的具体用法?Python tensor.Elemwise怎么用?Python tensor.Elemwise使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.Elemwise方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_infer_shape

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def test_infer_shape(self):

        for s_left, s_right in [((5, 6), (5, 6)),
                           ((5, 6), (5, 1)),
                           ((5, 6), (1, 6)),
                           ((5, 1), (5, 6)),
                           ((1, 6), (5, 6)),
                           ((2, 3, 4, 5), (2, 3, 4, 5)),
                           ((2, 3, 4, 5), (2, 3, 1, 5)),
                            ((2, 3, 4, 5), (1, 3, 4, 5)),
                            ((2, 1, 4, 5), (2, 3, 4, 5)),
                            ((2, 3, 4, 1), (2, 3, 4, 5))]:
            dtype = theano.config.floatX
            t_left = TensorType(dtype, [(entry == 1) for entry in s_left])()
            t_right = TensorType(dtype, [(entry == 1) for entry in s_right])()
            t_left_val = numpy.zeros(s_left, dtype=dtype)
            t_right_val = numpy.zeros(s_right, dtype=dtype)
            self._compile_and_check([t_left, t_right],
                            [Elemwise(scalar.add)(t_left, t_right)],
                            [t_left_val, t_right_val], Elemwise) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:22,代码来源:test_elemwise.py

示例2: test_gt_grad

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def test_gt_grad():
    """A user test that failed.

    Something about it made Elemwise.grad return something that was
    too complicated for get_scalar_constant_value to recognize as being 0, so
    gradient.grad reported that it was not a valid gradient of an
    integer.

    """
    floatX = config.floatX
    T = theano.tensor

    input_ = T.vector(dtype=floatX)
    random_values = numpy.random.RandomState(1234).uniform(
                                                low=-1, high=1, size=(2, 2))
    W_values = numpy.asarray(random_values, dtype=floatX)
    W = theano.shared(value=W_values, name='weights')
    correct_score = T.dot(input_, W)
    wrong_input = T.vector(dtype=floatX)
    wrong_score = theano.clone(correct_score, {input_: wrong_input})
    # Hinge loss

    scores = T.ones_like(correct_score) - correct_score + wrong_score
    cost = (scores * (scores > 0)).sum()
    T.grad(cost, input_) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:27,代码来源:test_elemwise.py

示例3: test_incsubtensor_mixed

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def test_incsubtensor_mixed():

    # This catches a bug that occurred when incrementing
    # a float32 tensor by a float64 tensor.
    # The result is defined to be float32, so it is OK
    # to downcast the float64 increment in order to
    # transfer it to the GPU.
    # The bug was that the optimization called GpuFromHost
    # without casting first, causing the optimization to
    # fail.
    X = tensor.fmatrix()
    Y = tensor.dmatrix()
    Z = tensor.inc_subtensor(X[0:1, 0:1], Y)
    f = theano.function([X, Y], Z, mode=mode_with_gpu)
    packed, = f.maker.fgraph.inputs[1].clients
    client, idx = packed
    print(client)
    assert isinstance(client.op, tensor.Elemwise)
    assert isinstance(client.op.scalar_op, theano.scalar.Cast)
    packed, = client.outputs[0].clients
    client, idx = packed
    assert isinstance(client.op, cuda.GpuFromHost) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:24,代码来源:test_opt.py

示例4: test_elemwise3

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def test_elemwise3():
    """ Several kinds of elemwise expressions with dimension
    permutations and broadcasting"""

    shape = (3, 4, 5, 6)
    a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
                                               dtype='float32'), 'a')
    b = tensor.fvector()
    new_val = (a + b).dimshuffle([2, 0, 3, 1])
    new_val *= tensor.exp(1 + b ** a).dimshuffle([2, 0, 3, 1])
    f = pfunc([b], [], updates=[(a, new_val)], mode=mode_with_gpu)
    has_elemwise = False
    for i, node in enumerate(f.maker.fgraph.toposort()):
        has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
    assert not has_elemwise
    # let debugmode catch errors
    f(theano._asarray(numpy.random.rand(6), dtype='float32')) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:19,代码来源:test_basic_ops.py

示例5: test_elemwise4

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def test_elemwise4():
    """ Test that two vectors can be broadcast to form an outer
    product (by performing rank-1 matrix update"""

    shape = (3, 4)
    a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
                                               dtype='float32'), 'a')
    b = tensor.fvector()
    c = tensor.fvector()
    f = pfunc([b, c], [],
              updates=[(a, (a + b.dimshuffle('x', 0) * c.dimshuffle(0, 'x')))],
              mode=mode_with_gpu)
    has_elemwise = False
    for i, node in enumerate(f.maker.fgraph.toposort()):
        has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
    assert not has_elemwise
    # let debugmode catch errors
    f(theano._asarray(numpy.random.rand(4), dtype='float32'),
      theano._asarray(numpy.random.rand(3), dtype='float32')) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:21,代码来源:test_basic_ops.py

示例6: main

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def main():
    vlen = 10 * 30 * 768  # 10 x #cores x # threads per core
    iters = 1000

    rng = numpy.random.RandomState(22)
    x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
    f = function([], T.exp(x))
    print(f.maker.fgraph.toposort())
    t0 = time.time()
    for i in iter(range(iters)):
        r = f()
    t1 = time.time()
    print('Looping %d times took' % iters, t1 - t0, 'seconds')
    print('Result is', r)
    if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):
        print('Used the cpu')
    else:
        print('Used the gpu') 
开发者ID:vitruvianscience,项目名称:OpenDeep,代码行数:20,代码来源:theano_test.py

示例7: get_processor_type

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def get_processor_type():
    """
    Test whether the GPU is being used, based on the example in

      http://deeplearning.net/software/theano/tutorial/using_gpu.html

    """
    rng = np.random.RandomState(22)

    n = 10*30*768
    x = shared(rng.rand(n))
    f = function([], T.exp(x))

    if np.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):
        return 'cpu'
    return 'gpu' 
开发者ID:frsong,项目名称:pycog,代码行数:18,代码来源:theanotools.py

示例8: test_if_using_GPU

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def test_if_using_GPU(verbose=False):
    dtype = config.floatX  # @UndefinedVariable
    vlen = 10 * 30 * 768  # 10 x #cores x # threads per core
    iters = 100

    rng = numpy.random.RandomState(22)
    x = shared(numpy.asarray(rng.rand(vlen), dtype))
    f = function([], tensor.exp(x))
    # print(f.maker.fgraph.toposort())
    t0 = time.time()
    for _ in range(iters):
        r = f()
    t1 = time.time()
    dur = t1 - t0
    if verbose:
        print("Looping %d times took %f seconds" % (iters, dur))
        print("Result is %s" % (r,))
    if numpy.any([isinstance(x.op, tensor.Elemwise) and
                  ('Gpu' not in type(x.op).__name__)
                  for x in f.maker.fgraph.toposort()]):
        print('Using the cpu')
        return False
    else:
        print('Using the gpu')
        return True 
开发者ID:clp-research,项目名称:deep_disfluency,代码行数:27,代码来源:test_if_using_gpu.py

示例9: local_gpua_row_switch

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def local_gpua_row_switch(node):
    """
    Detects eligible Switch instances and replaces them with a GPU
    row switch.
    """

    if (node.op.__class__ == T.Elemwise
        and node.op.scalar_op.__class__ != theano.scalar.Switch):
        return False

    cond, ift, iff = node.inputs
    out, = node.outputs

    # Only applies to Switch instances where a vector mask broadcasts over
    # matrices.
    bcast = cond.broadcastable
    if not bcast or not (not bcast[0] and all(bcast[1:])
                         and ift.ndim in [2, 3]):
        return False

    if not (ift.dtype == iff.dtype == "float32"):
        return False

    if cond.owner and isinstance(cond.owner.op, HostFromGpu):
        gpu_cond, = cond.owner.inputs
    else:
        gpu_cond = as_cuda_ndarray_variable(
                T.cast(cond.flatten(), "float32"))

    if ift.owner and isinstance(ift.owner.op, HostFromGpu):
        gpu_ift, = ift.owner.inputs
    else:
        gpu_ift = as_cuda_ndarray_variable(ift)

    if iff.owner and isinstance(iff.owner.op, HostFromGpu):
        gpu_iff, = iff.owner.inputs
    else:
        gpu_iff = as_cuda_ndarray_variable(iff)

    gpu_op = GpuRowSwitch()
    return [HostFromGpu()(gpu_op(cond, gpu_ift, gpu_iff))] 
开发者ID:stanfordnlp,项目名称:spinn,代码行数:43,代码来源:cuda.py

示例10: with_linker

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def with_linker(self, linker, op, type, rand_val):
        for xsh, ysh in [((3, 5), (3, 5)),
                         ((3, 5), (1, 5)),
                         ((3, 5), (3, 1)),
                         ((1, 5), (5, 1)),
                         ((1, 1), (1, 1)),
                         ((self.openmp_minsize,), (self.openmp_minsize,)),
                         ((self.openmp_minsize_sqrt,
                           self.openmp_minsize_sqrt),
                          (self.openmp_minsize_sqrt,
                           self.openmp_minsize_sqrt)),
                         ((2, 3, 4, 5), (2, 3, 4, 5)),
                         ((2, 3, 4, 5), (1, 3, 1, 5)),
                         ((2, 3, 4, 5), (1, 1, 1, 1)),
                         ((), ())]:
            x = type('float64', [(entry == 1) for entry in xsh])('x')
            y = type('float64', [(entry == 1) for entry in ysh])('y')
            e = op(scalar.add)(x, y)
            f = copy(linker).accept(FunctionGraph([x, y], [e])).make_function()
            xv = rand_val(xsh)
            yv = rand_val(ysh)
            zv = xv + yv

            unittest_tools.assert_allclose(f(xv, yv), zv)

            # test Elemwise.infer_shape
            # the Shape op don't implement c_code!
            if isinstance(linker, gof.PerformLinker):
                x = type('float64', [(entry == 1) for entry in xsh])('x')
                y = type('float64', [(entry == 1) for entry in ysh])('y')
                e = op(scalar.add)(x, y)
                f = copy(linker).accept(FunctionGraph(
                    [x, y], [e.shape])).make_function()
                assert tuple(f(xv, yv)) == tuple(zv.shape) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:36,代码来源:test_elemwise.py

示例11: with_linker_inplace

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def with_linker_inplace(self, linker, op, type, rand_val):
        for xsh, ysh in [((5, 5), (5, 5)),
                         ((5, 5), (1, 5)),
                         ((5, 5), (5, 1)),
                         ((1, 1), (1, 1)),
                         ((2, 3, 4, 5), (2, 3, 4, 5)),
                         ((2, 3, 4, 5), (1, 3, 1, 5)),
                         ((2, 3, 4, 5), (1, 1, 1, 1)),
                         ((), ())]:
            x = type('float64', [(entry == 1) for entry in xsh])('x')
            y = type('float64', [(entry == 1) for entry in ysh])('y')
            e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
            f = copy(linker).accept(FunctionGraph([x, y], [e])).make_function()
            xv = rand_val(xsh)
            yv = rand_val(ysh)
            zv = xv + yv

            f(xv, yv)

            self.assertTrue((xv == zv).all())
            # test Elemwise.infer_shape
            # the Shape op don't implement c_code!
            if isinstance(linker, gof.PerformLinker):
                x = type('float64', [(entry == 1) for entry in xsh])('x')
                y = type('float64', [(entry == 1) for entry in ysh])('y')
                e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
                f = copy(linker).accept(FunctionGraph(
                    [x, y], [e.shape])).make_function()
                xv = rand_val(xsh)
                yv = rand_val(ysh)
                zv = xv + yv

                f(xv, yv)

                assert xv.shape == zv.shape 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:37,代码来源:test_elemwise.py

示例12: test_not_implemented_elemwise_grad

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def test_not_implemented_elemwise_grad():
    """
    Regression test for unimplemented gradient in an Elemwise Op.
    """

    class TestOp(scalar.ScalarOp):

        def __init__(self):
            self.output_types_preference = scalar.upgrade_to_float

        def impl(self, n, x):
            return x * n

        def grad(self, inputs, gout):
            (n, x) = inputs
            (gz,) = gout
            dy_dx = n
            return [theano.gradient.grad_not_implemented(self, 0, n),
                    gz * dy_dx]

    test_op = tensor.Elemwise(TestOp())
    x = tensor.scalar()
    # The call to `grad` used to crash.
    tensor.grad(test_op(2, x), x)
    # Verify that trying to use the not implemented gradient fails.
    try:
        tensor.grad(test_op(x, 2), x)
        assert False
    except theano.gradient.NullTypeGradError:
        pass 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:32,代码来源:test_elemwise.py

示例13: dtype_in_elemwise_supported

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def dtype_in_elemwise_supported(op):
    """
    Return True of the Elemwise op is supported on the gpu.
    Return False otherwise.

    Notes
    -----
    We need to check inside the Composite op.

    """
    def get_all_basic_scalar(composite_op):
        l = []
        for i in composite_op.fgraph.toposort():
            if isinstance(i, theano.scalar.Composite):
                l += get_all_basic_scalar(i)
            else:
                l.append(i)
        return l
    if isinstance(op, GpuElemwise) or isinstance(op, tensor.Elemwise):
        if isinstance(op.scalar_op, theano.scalar.Composite):
            scals = get_all_basic_scalar(op.scalar_op)
            for s in scals:
                if any([i.type.dtype not in elemwise_cuda_dtype_supported
                        for i in s.inputs + s.outputs]):
                    return False
    return True 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:28,代码来源:opt.py

示例14: local_gpu_elemwise_1

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def local_gpu_elemwise_1(node):
    """
    gpu_from_host(Elemwise)) -> GpuElemwise(gpu_from_host(...))

    """
    if isinstance(node.op, GpuFromHost):
        host_i, = node.inputs
        if (host_i.owner and
                isinstance(host_i.owner.op, tensor.Elemwise) and
                len(host_i.clients) == 1 and
                dtype_in_elemwise_supported(node.op)):

            elemwise_node = host_i.owner
            # Don't set any inplace pattern.
            # gpu_inplace_elemwise_optimizer will do it later

            if isinstance(elemwise_node.op.scalar_op, Erfinv):
                new_op = GpuElemwise(erfinv_gpu)
            elif isinstance(elemwise_node.op.scalar_op, Erfcx):
                new_op = GpuElemwise(erfcx_gpu)
            else:
                try:
                    new_op = GpuElemwise(elemwise_node.op.scalar_op)
                except SupportCodeError:
                    # This happens when scalar_op requires support code
                    return False

            if all([i.dtype == 'float32' for i in elemwise_node.inputs]):
                gpu_elemwise = new_op(*[as_cuda_ndarray_variable(i)
                                        for i in elemwise_node.inputs])
                gpu_elemwise = split_huge_add_or_mul(gpu_elemwise.owner)
                if not gpu_elemwise:
                    return False
                return [gpu_elemwise.outputs[0]]
    return False 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:37,代码来源:opt.py

示例15: test_erfinvgpu

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import Elemwise [as 别名]
def test_erfinvgpu():
    """ Test that local_gpu_elemwise_0 replaces Erfinv with ErfinvGPU """
    x = tensor.fmatrix()
    f = theano.function([x], tensor.Elemwise(erfinv)(x), mode=mode_with_gpu)
    f2 = theano.function([x], tensor.Elemwise(erfinv)(x),
                         mode=mode_without_gpu)
    assert isinstance(f.maker.fgraph.toposort()[1].op, cuda.GpuElemwise)
    assert isinstance(f.maker.fgraph.toposort()[1].op.scalar_op,
                      cuda.elemwise.ErfinvGPU)
    xv = numpy.random.rand(7, 8).astype('float32')
    if imported_scipy_special:
        assert numpy.allclose(f(xv), f2(xv)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:14,代码来源:test_opt.py


注:本文中的theano.tensor.Elemwise方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。