當前位置: 首頁>>代碼示例>>Python>>正文


Python sugartensor.concat方法代碼示例

本文整理匯總了Python中sugartensor.concat方法的典型用法代碼示例。如果您正苦於以下問題:Python sugartensor.concat方法的具體用法?Python sugartensor.concat怎麽用?Python sugartensor.concat使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sugartensor的用法示例。


在下文中一共展示了sugartensor.concat方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: sg_concat

# 需要導入模塊: import sugartensor [as 別名]
# 或者: from sugartensor import concat [as 別名]
def sg_concat(tensor, opt):
    r"""Concatenates tensors along a axis.

    See `tf.concat()` in tensorflow.

    Args:
      tensor: A `Tensor` (automatically given by chain).
      opt:
        target: A `Tensor`. Must have the same rank as `tensor`, and
          all dimensions except `opt.dim` must be equal.
        axis : Target axis. Default is the last one.
        name: If provided, replace current tensor's name.

    Returns:
      A `Tensor`.
    """
    assert opt.target is not None, 'target is mandatory.'
    opt += tf.sg_opt(axis=tensor.get_shape().ndims-1)
    target = opt.target if isinstance(opt.target, (tuple, list)) else [opt.target]
    return tf.concat([tensor] + target, opt.axis, name=opt.name) 
開發者ID:buriburisuri,項目名稱:sugartensor,代碼行數:22,代碼來源:sg_transform.py

示例2: sg_emb

# 需要導入模塊: import sugartensor [as 別名]
# 或者: from sugartensor import concat [as 別名]
def sg_emb(**kwargs):
    r"""Returns a look-up table for embedding.
    
    kwargs:
      name: A name for the layer.
      emb: A 2-D array (optional). 
        If None, the resulting tensor should have the shape of 
        `[vocabulary size, embedding dimension size]`.
        Note that its first row is filled with 0's associated with padding.
      in_dim: A positive `integer`. The size of input dimension.
      dim: A positive `integer`. The size of output dimension.
      voca_size: A positive integer. The size of vocabulary.
      summary: If True, summaries are added. The default is True.

    Returns:
      A 2-D `Tensor` of float32.
    """
    opt = tf.sg_opt(kwargs)
    assert opt.name is not None, 'name is mandatory.'

    if opt.emb is None:
        # initialize embedding matrix
        assert opt.voca_size is not None, 'voca_size is mandatory.'
        assert opt.dim is not None, 'dim is mandatory.'
        w = tf.sg_initializer.he_uniform(opt.name, (opt.voca_size - 1, opt.dim), summary=opt.summary)
    else:
        # use given embedding matrix
        w = tf.sg_initializer.external(opt.name, value=opt.emb, summary=opt.summary)

    # 1st row should be zero and not be updated by backprop because of zero padding.
    emb = tf.concat([tf.zeros((1, opt.dim), dtype=tf.sg_floatx), w], 0)

    return emb


# layer normalization for rnn 
開發者ID:buriburisuri,項目名稱:sugartensor,代碼行數:38,代碼來源:sg_layer.py

示例3: sg_periodic_shuffle

# 需要導入模塊: import sugartensor [as 別名]
# 或者: from sugartensor import concat [as 別名]
def sg_periodic_shuffle(tensor, opt):
    r""" Periodic shuffle transformation for SubPixel CNN.
        (see [Shi et al. 2016](http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shi_Real-Time_Single_Image_CVPR_2016_paper.pdf)
        
    Args:
      tensor: A tensor (automatically given by chain).
      opt:
        factor: factor to multiply shape by. Default is 2.
        name : If provided, it replaces current tensor's name.

    Returns:
        A tensor
    """
    # default factor
    opt += tf.sg_opt(factor=2)

    # get current shape
    batch, row, col, channel = tensor.get_shape().as_list()

    # get target channel num
    channel_target = channel // (opt.factor * opt.factor)
    channel_factor = channel // channel_target

    # intermediate shape for shuffling
    shape_1 = [batch, row, col, channel_factor // opt.factor, channel_factor // opt.factor]
    shape_2 = [batch, row * opt.factor, col * opt.factor, 1]

    # reshape and transpose for periodic shuffling for each channel
    out = []
    for i in range(channel_target):
        out.append((tensor[:, :, :, i*channel_factor:(i+1)*channel_factor])
                   .sg_reshape(shape=shape_1)
                   .sg_transpose(perm=(0, 1, 3, 2, 4))
                   .sg_reshape(shape=shape_2))

    # final output
    out = tf.concat(out, 3)

    return tf.identity(out, name=opt.name) 
開發者ID:buriburisuri,項目名稱:sugartensor,代碼行數:41,代碼來源:sg_transform.py

示例4: sg_inverse_periodic_shuffle

# 需要導入模塊: import sugartensor [as 別名]
# 或者: from sugartensor import concat [as 別名]
def sg_inverse_periodic_shuffle(tensor, opt):
    r"""Inverse periodic shuffle transformation for SubPixel CNN.
        (see [Shi et al. 2016](http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shi_Real-Time_Single_Image_CVPR_2016_paper.pdf)
        
    Args:
      tensor: A tensor (automatically given by chain).
      opt:
        factor: factor to multiply shape by. Default is 2.
        name : If provided, it replaces current tensor's name.

    Returns:
        A tensor
    """
    # default factor
    opt += tf.sg_opt(factor=2)

    # get current shape
    batch, row, col, channel = tensor.get_shape().as_list()

    # get target shape and channel num
    channel_factor = opt.factor * opt.factor

    # intermediate shape for shuffling
    shape_1 = [batch, row // opt.factor, col // opt.factor, channel_factor // opt.factor, channel_factor // opt.factor]
    shape_2 = [batch, row // opt.factor, col // opt.factor, channel_factor]

    # reshape and transpose for periodic shuffling for each channel
    out = []
    for i in range(channel):
        out.append(tensor[:, :, :, i]
                   .sg_expand_dims()
                   .sg_reshape(shape=shape_1)
                   .sg_transpose(perm=(0, 1, 3, 2, 4))
                   .sg_reshape(shape=shape_2))

    # final output
    out = tf.concat(out, 3)

    return tf.identity(out, name=opt.name) 
開發者ID:buriburisuri,項目名稱:sugartensor,代碼行數:41,代碼來源:sg_transform.py

示例5: sg_quasi_conv1d

# 需要導入模塊: import sugartensor [as 別名]
# 或者: from sugartensor import concat [as 別名]
def sg_quasi_conv1d(tensor, opt):
    '''
    Args:
      tensor: A 3-D tensor of either [batch size, time steps, embedding size] for original
          X or [batch size * 4, time steps, embedding size] for the others.
           
    '''
    opt += tf.sg_opt(is_enc=False)
    
    # Split into H and H_zfo
    H = tensor[:Hp.batch_size]
    H_z = tensor[Hp.batch_size:2*Hp.batch_size]
    H_f = tensor[2*Hp.batch_size:3*Hp.batch_size]
    H_o = tensor[3*Hp.batch_size:]
    if opt.is_enc:
        H_z, H_f, H_o = 0, 0, 0
    
    # Convolution and merging
    with tf.sg_context(size=opt.size, act="linear", causal=(not opt.is_enc)):
        Z = H.sg_aconv1d() + H_z # (16, 150, 320)
        F = H.sg_aconv1d() + H_f # (16, 150, 320)
        O = H.sg_aconv1d() + H_o # (16, 150, 320)

    # Activation
    Z = Z.sg_bypass(act="tanh") # (16, 150, 320)
    F = F.sg_bypass(act="sigmoid") # (16, 150, 320)
    O = O.sg_bypass(act="sigmoid") # (16, 150, 320)
    
    # Masking
    #M = tf.sign(tf.abs(tf.reduce_sum(H, axis=-1, keep_dims=True))) # (16, 150, 1) float32. 0 or 1
    #Z *= M # broadcasting
    #F *= M # broadcasting
    #O *= M # broadcasting
    
    # Concat
    ZFO = tf.concat([Z, F, O], 0)
    
    return ZFO # (16*3, 150, 320)

# injection 
開發者ID:Kyubyong,項目名稱:quasi-rnn,代碼行數:42,代碼來源:train.py

示例6: sg_densenet_layer

# 需要導入模塊: import sugartensor [as 別名]
# 或者: from sugartensor import concat [as 別名]
def sg_densenet_layer(x, opt):
    r"""Applies basic architecture of densenet layer.

    Note that the fc layers in the original architecture
      will be replaced with fully convolutional layers.
      For convenience, We still call them fc layers, though.

    Args:
      x: A `Tensor`.
      opt:
          dim: An integer. Dimension for this resnet layer
          num: Number of times to repeat
          act: String. 'relu' (default). the activation function name
          trans: Boolean. If True(default), transition layer will be applied.
          reuse: Boolean(Optional). If True, all variables will be loaded from previous network.
          name: String. (optional) Used as convolution layer prefix

    Returns:
      A `Tensor`.
    """
    assert opt.dim is not None, 'dim is mandatory.'
    assert opt.num is not None, 'num is mandatory.'

    # default stride
    opt += tf.sg_opt(stride=1, act='relu', trans=True)

    # format convolutional layer name
    def cname(index):
        return opt.name if opt.name is None else opt.name + '_%d' % index

    # dense layer
    with tf.sg_context(bias=False, reuse=opt.reuse):
        out = x
        for i in range(opt.num):
            # dense block
            out_new = (out
                       .sg_bypass(act=opt.act, bn=True, name=cname(3 * i + 1))
                       .sg_conv(dim=opt.dim // 4, size=1, act=opt.act, bn=True, name=cname(3 * i + 2))
                       .sg_conv(dim=opt.dim, size=3, name=cname(3 * i + 3)))
            out = tf.concat([out_new, out], 3)

        # transition layer
        if opt.trans:
            out = (out
                   .sg_bypass(act=opt.act, bn=True, name=cname(3 * i + 4))
                   .sg_conv(size=1, name=cname(3 * i + 5))
                   .sg_pool(avg=True))

    return out


# construct dense network graphs 
開發者ID:buriburisuri,項目名稱:sugartensor,代碼行數:54,代碼來源:sg_net.py

示例7: sg_quasi_rnn

# 需要導入模塊: import sugartensor [as 別名]
# 或者: from sugartensor import concat [as 別名]
def sg_quasi_rnn(tensor, opt):
    # Split
    if opt.att:
        H, Z, F, O = tf.split(tensor, 4, axis=0) # (16, 150, 320) for all
    else:
        Z, F, O = tf.split(tensor, 3, axis=0) # (16, 150, 320) for all
    
#     M = tf.sign(tf.abs(tf.reduce_sum(Z, axis=-1, keep_dims=True))) 
    # step func
    def step(z, f, o, c):
        '''
        Runs fo-pooling at each time step
        '''
        c = f * c + (1 - f) * z
        
        if opt.att: # attention
            a = tf.nn.softmax(tf.einsum("ijk,ik->ij", H, c)) # alpha. (16, 150) 
            k = (a.sg_expand_dims() * H).sg_sum(axis=1) # attentional sum. (16, 320) 
            h = o * (k.sg_dense(act="linear") + \
                     c.sg_dense(act="linear"))
        else:
            h = o * c
        
        return h, c # hidden states, (new) cell memories
    
    # Do rnn loop
    c, hs = 0, []
    timesteps = tensor.get_shape().as_list()[1]
    for t in range(timesteps):
        z = Z[:, t, :] # (16, 320)
        f = F[:, t, :] # (16, 320)
        o = O[:, t, :] # (16, 320)

        # apply step function
        h, c = step(z, f, o, c) # (16, 320), (16, 320)
        
        # save result
        hs.append(h.sg_expand_dims(axis=1))
    
    # Concat to return    
    H = tf.concat(hs, 1) # (16, 150, 320)
    #seqlen = tf.to_int32(tf.reduce_sum(tf.sign(tf.abs(tf.reduce_sum(H, axis=-1))), 1)) # (16,) float32
    #h = tf.reverse_sequence(input=H, seq_length=seqlen, seq_dim=1)[:, 0, :] # last hidden state vector
    
    if opt.is_enc: 
        H_z = tf.tile((h.sg_dense(act="linear").sg_expand_dims(axis=1)), [1, timesteps, 1])
        H_f = tf.tile((h.sg_dense(act="linear").sg_expand_dims(axis=1)), [1, timesteps, 1])
        H_o = tf.tile((h.sg_dense(act="linear").sg_expand_dims(axis=1)), [1, timesteps, 1])
        concatenated = tf.concat([H, H_z, H_f, H_o], 0) # (16*4, 150, 320)
        return concatenated
    else:
        return H # (16, 150, 320)
    
# injection 
開發者ID:Kyubyong,項目名稱:quasi-rnn,代碼行數:56,代碼來源:train.py


注:本文中的sugartensor.concat方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。