当前位置: 首页>>代码示例>>Python>>正文


Python tensor.join方法代码示例

本文整理汇总了Python中theano.tensor.join方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.join方法的具体用法?Python tensor.join怎么用?Python tensor.join使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.join方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: local_gpuaalloc2

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def local_gpuaalloc2(node):
    """
    Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)

    Moves an alloc that is an input to join to the gpu.

    """
    try:
        get_context(None)
    except ContextNotDefined:
        # If there is no default context then we do not perform the move here.
        return
    if (isinstance(node.op, tensor.Alloc) and
        all(c != 'output' and
            c.op == tensor.join and
            all(i.owner and
                i.owner.op in [host_from_gpu, tensor.alloc]
                for i in c.inputs[1:])
            for c, idx in node.outputs[0].clients)):
        return [host_from_gpu(GpuAlloc(None)(*node.inputs))] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:22,代码来源:opt.py

示例2: test_gpujoin_gpualloc

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def test_gpujoin_gpualloc():
    a = T.fmatrix('a')
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    b = T.fmatrix('b')
    b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')

    f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4,
                        mode=mode_without_gpu)
    f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
                            mode=mode_with_gpu)
    f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
                                            T.ones_like(b)) + 4,
                             mode=mode_with_gpu)
    assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2
    assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, GpuAlloc)
                for node in f_gpu.maker.fgraph.toposort()]) == 2
    assert sum([node.op == gpu_join
                for node in f_gpu.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, GpuAlloc)
                for node in f_gpu2.maker.fgraph.toposort()]) == 2
    assert sum([node.op == gpu_join
                for node in f_gpu2.maker.fgraph.toposort()]) == 1
    assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:26,代码来源:test_basic_ops.py

示例3: test_opt_gpujoin_onlyajoin

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def test_opt_gpujoin_onlyajoin():
    # from a bug in normal sampling
    _a = numpy.asarray([[1, 2], [3, 4]], dtype='float32')
    _b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float32')
    a = cuda.shared_constructor(_a)
    b = cuda.shared_constructor(_b)

    c = tensor.join(1, a, b)

    f = theano.function([], c, mode=mode_with_gpu)

    f()

    graph_nodes = f.maker.fgraph.toposort()

    assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
    assert isinstance(graph_nodes[-2].op, cuda.GpuJoin)

    assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))

    # test mixed dtype
    _b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float64')
    b = theano.tensor.constant(_b)

    c = tensor.join(1, a, b)

    f = theano.function([], c, mode=mode_with_gpu)

    f()

    graph_nodes = f.maker.fgraph.toposort()
    assert isinstance(graph_nodes[-1].op, theano.tensor.Join)

    assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:36,代码来源:test_opt.py

示例4: test_opt_gpujoin_joinvectors_elemwise_then_minusone

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def test_opt_gpujoin_joinvectors_elemwise_then_minusone():
    # from a bug in gpu normal sampling
    _a = numpy.asarray([1, 2, 3, 4], dtype='float32')
    _b = numpy.asarray([5, 6, 7, 8], dtype='float32')
    a = cuda.shared_constructor(_a)
    b = cuda.shared_constructor(_b)

    a_prime = tensor.cos(a)
    b_prime = tensor.sin(b)

    c = tensor.join(0, a_prime, b_prime)

    d = c[:-1]

    f = theano.function([], d, mode=mode_with_gpu)

    graph_nodes = f.maker.fgraph.toposort()

    assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
    assert isinstance(graph_nodes[-2].op, cuda.GpuSubtensor)
    assert isinstance(graph_nodes[-3].op, cuda.GpuJoin)

    concat = numpy.concatenate([numpy.cos(_a), numpy.sin(_b)], axis=0)
    concat = concat[:-1]

    assert numpy.allclose(numpy.asarray(f()), concat) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:28,代码来源:test_opt.py

示例5: test_gpujoin_no_rebroadcast

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def test_gpujoin_no_rebroadcast():
    _a = numpy.asarray([[1, 2], [3, 4]], dtype='float32')
    a = tcn.shared_constructor(_a)
    f = theano.function([], T.join(1, a))
    l = f.maker.fgraph.toposort()
    assert not any([isinstance(x.op, T.Rebroadcast) for x in l]) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:8,代码来源:test_basic_ops.py

示例6: local_gpuajoin_1

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def local_gpuajoin_1(node):
    # join of a single element
    if (isinstance(node.op, GpuJoin) and
            len(node.inputs) == 2):
        return [node.inputs[1]] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:7,代码来源:opt.py

示例7: setUp

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def setUp(self):
        super(G_Join_and_Split, self).setUp()
        self.mode = mode_with_gpu.excluding('constant_folding')
        self.join_op = GpuJoin()
        self.split_op_class = GpuSplit
        # Use join instead of MakeVector since there is no MakeVector on GPU
        self.make_vector_op = GpuJoin()
        # this is to avoid errors with limited devices
        self.floatX = 'float32'
        self.hide_error = theano.config.mode not in ['DebugMode', 'DEBUG_MODE']
        self.shared = gpuarray_shared_constructor 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:13,代码来源:test_basic_ops.py

示例8: test_join

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def test_join(self):
        tv = numpy.asarray(self.rng.uniform(size=(10,)),
                           theano.config.floatX)
        t = theano.shared(tv)
        out = tensor.join(0, self.x, t)
        self.check_rop_lop(out, (self.in_shape[0] + 10,)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:8,代码来源:test_rop.py

示例9: _lmul

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def _lmul(self, x, T):
            if T:
                if len(self.col_shape())>1:
                    x2 = x.flatten(2)
                else:
                    x2 = x
                n_rows = x2.shape[0]
                offset = 0
                xWlist = []
                assert len(self._col_sizes) == len(self._Wlist)
                for size, W in zip(self._col_sizes, self._Wlist):
                    # split the output rows into pieces
                    x_s = x2[:,offset:offset+size]
                    # multiply each piece by one transform
                    xWlist.append(
                            W.lmul(
                                x_s.reshape(
                                    (n_rows,)+W.col_shape()),
                                T))
                    offset += size
                # sum the results
                rval = tensor.add(*xWlist)
            else:
                # multiply the input by each transform
                xWlist = [W.lmul(x,T).flatten(2) for W in self._Wlist]
                # join the resuls
                rval = tensor.join(1, *xWlist)
            return rval 
开发者ID:zchengquan,项目名称:TextDetector,代码行数:30,代码来源:linear.py

示例10: multiple_l2_norm

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def multiple_l2_norm(tensors):
    """
    Get the L2 norm of multiple tensors.
    This function is taken from blocks.
    """
    # Another way for doing this, I don't know which one is fast
    # return T.sqrt(sum(T.sum(t ** 2) for t in tensors))
    flattened = [T.as_tensor_variable(t).flatten() for t in tensors]
    flattened = [(t if t.ndim > 0 else t.dimshuffle('x'))
                 for t in flattened]
    joined = T.join(0, *flattened)
    return T.sqrt(T.sqr(joined).sum()) 
开发者ID:zomux,项目名称:deepy,代码行数:14,代码来源:util.py

示例11: circle_gaussian_mixture

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def circle_gaussian_mixture(num_modes, num_samples, dimension, r=0.0, std=1.0, theano_rng=None):
    global default_rng
    if theano_rng is None:
        if default_rng is None:
            init_rng()
        theano_rng = default_rng
    samples = None
    if dimension == 1:
        for i in range(num_modes):
            num_samples_local = (num_samples + i) // num_modes
            avg = -r + 2 * r / max(1, num_modes - 1) * i
            samples_local = theano_rng.normal((num_samples_local, dimension), avg=avg, std=std)
            if samples is None:
                samples = samples_local
            else:
                samples = join(0, samples, samples_local)
    elif dimension >= 2:
        for i in range(num_modes):
            num_samples_local = (num_samples + i) // num_modes
            x = r * cos(2 * pi / num_modes * i)
            y = r * sin(2 * pi / num_modes * i)
            samples_local_x = theano_rng.normal((num_samples_local, 1), avg=0.0, std=std)
            samples_local_x += x
            samples_local_y = theano_rng.normal((num_samples_local, 1), avg=0.0, std=std)
            samples_local_y += y
            samples_local = join(1, samples_local_x, samples_local_y)
            if dimension > 2:
                samples_local_left = theano_rng.normal((num_samples_local, dimension - 2), avg=0.0, std=std)
                samples_local = join(1, samples_local, samples_local_left)
            
            if samples is None:
                samples = samples_local
            else:
                samples = join(0, samples, samples_local)
    return samples 
开发者ID:fjxmlzn,项目名称:PacGAN,代码行数:37,代码来源:distributions.py

示例12: local_gpu_join

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def local_gpu_join(node):
    """
    Inspired by the opt for convop.

    Very loose notation follows.

    Subgraphs concerned first look like
        [array of HostTensor] -> HostToGpu -> GpuToHost
        -> Join -> HostToGpu -> GpuToHost

    First we apply this Opt:

    join(host_from_gpu) -> host_from_gpu(gpu_join)

    then, as an intermediate result, there should be
    host_from_gpu(gpu_join) -> HostToGpu -> GpuToHost
    this unnecessary GpuToHost -> HostToGpu should be removed
    by other opts, leaving us with
    host_from_gpu(gpu_join)

    For intermediate places in the graph not covered by the first opt, the
    following could be useful:

    gpu_from_host(join) -> gpu_join(gpu_from_host)

    not implemented yet.

    """
    if isinstance(node.op, tensor.Join):
        # optimizing this case:
        # join(host_from_gpu) -> host_from_gpu(gpu_join)

        axis_and_tensors = node.inputs

        matches = [t.dtype == 'float32' and
                   ((t.owner is not None and
                     isinstance(t.owner.op, HostFromGpu)) or
                    isinstance(t, gof.Constant)) for t in axis_and_tensors[1:]]

        if all(matches):
            new_tensors = [as_cuda_ndarray_variable(t)
                           for t in axis_and_tensors[1:]]
            new_a_and_t = [axis_and_tensors[0]] + new_tensors

            replacement_node = host_from_gpu(gpu_join(*new_a_and_t))

            return [replacement_node]

# This is a copy of the same opt in tensor to make the tests happy,
# but I'm not convinced it is actually needed. 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:52,代码来源:opt.py

示例13: local_gpualloc

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import join [as 别名]
def local_gpualloc(node):
    replace = False
    if node.op == tensor.alloc:
        if node.inputs[0].owner and \
           isinstance(node.inputs[0].owner.op, HostFromGpu):
            replace = True
        elif all([c != 'output' and c.op == gpu_from_host
                  for c, idx in node.outputs[0].clients]):
            # if all clients are on gpu
            replace = True
        elif all([c != 'output' and
                  c.op == tensor.join and
                  all(i.owner and
                      i.owner.op in [host_from_gpu, tensor.alloc]
                      for i in c.inputs[1:])
                  for c, idx in node.outputs[0].clients]):
            # if the client is on gpu or alloc
            replace = True
        if replace and node.inputs[0].dtype != 'float32':
            replace = False
    if replace:
        val = node.inputs[0]
        shp = node.inputs[1:]
        old_out = node.outputs[0]
        new_out = host_from_gpu(gpu_alloc(val, *shp))

        # Sigh. it's an annoying thing about theano
        # that you can't add information to the graph.
        # If for some reason it has come to light that
        # one of the dimensions is broadcastable, we have to hide that
        # or the optimization won't go through.
        if new_out.type != old_out.type:
            assert new_out.type.ndim == old_out.type.ndim
            assert new_out.type.dtype == old_out.type.dtype
            # it seems to have happened that new_out has some broadcastable
            # dimensions that old_out did not have
            for b_old, b_new in zip(old_out.type.broadcastable,
                                    new_out.type.broadcastable):
                assert b_new or (not b_old)
            new_out = tensor.patternbroadcast(new_out, old_out.broadcastable)

        return [new_out] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:44,代码来源:opt.py


注:本文中的theano.tensor.join方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。