当前位置: 首页>>代码示例>>Python>>正文


Python layers.linear方法代码示例

本文整理汇总了Python中tensorflow.contrib.layers.python.layers.layers.linear方法的典型用法代码示例。如果您正苦于以下问题:Python layers.linear方法的具体用法?Python layers.linear怎么用?Python layers.linear使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.layers.python.layers.layers的用法示例。


在下文中一共展示了layers.linear方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: prepare_attention

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import linear [as 别名]
def prepare_attention(attention_states,
                      kd_states,
                      attention_option,
                      num_units,
                      reuse=False):
    # Prepare attention keys / values from attention_states
    with variable_scope.variable_scope("attn_keys", reuse=reuse) as scope:
        attention_keys = layers.linear(attention_states, num_units,
                                       biases_initializer=None, scope=scope)
        if kd_states is not None:
            attention_values = (attention_states, kd_states)
        else:
            attention_values = attention_states
        # Attention scoring function
        attention_score_fn = _create_attention_score_fn("attn_score", num_units, attention_option, reuse)

    # Attention construction function
    attention_construct_fn = _create_attention_construct_fn("attn_construct",
                                                            num_units, attention_score_fn, reuse)

    return attention_keys, attention_values, attention_construct_fn 
开发者ID:siat-nlp,项目名称:TransDG,代码行数:23,代码来源:attention_decoder.py

示例2: prepare_attention

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import linear [as 别名]
def prepare_attention(attention_states,
                      attention_option,
                      num_units,
                      reuse=False):
  """Prepare keys/values/functions for attention.

  Args:
    attention_states: hidden states to attend over.
    attention_option: how to compute attention, either "luong" or "bahdanau".
    num_units: hidden state dimension.
    reuse: whether to reuse variable scope.

  Returns:
    attention_keys: to be compared with target states.
    attention_values: to be used to construct context vectors.
    attention_score_fn: to compute similarity between key and target states.
    attention_construct_fn: to build attention states.
  """

  # Prepare attention keys / values from attention_states
  with variable_scope.variable_scope("attention_keys", reuse=reuse) as scope:
    attention_keys = layers.linear(
        attention_states, num_units, biases_initializer=None, scope=scope)
  attention_values = attention_states

  # Attention score function
  attention_score_fn = _create_attention_score_fn("attention_score", num_units,
                                                  attention_option, reuse)

  # Attention construction function
  attention_construct_fn = _create_attention_construct_fn("attention_construct",
                                                          num_units,
                                                          attention_score_fn,
                                                          reuse)

  return (attention_keys, attention_values, attention_score_fn,
          attention_construct_fn) 
开发者ID:kepei1106,项目名称:SentenceFunction,代码行数:39,代码来源:my_attention_decoder_fn.py

示例3: _create_attention_construct_fn

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import linear [as 别名]
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
  """Function to compute attention vectors.

  Args:
    name: to label variables.
    num_units: hidden state dimension.
    attention_score_fn: to compute similarity between key and target states.
    reuse: whether to reuse variable scope.

  Returns:
    attention_construct_fn: to build attention states.
  """
  with variable_scope.variable_scope(name, reuse=reuse) as scope:

    def construct_fn(attention_query, attention_keys, attention_values):
      context = attention_score_fn(attention_query, attention_keys,
                                   attention_values)
      concat_input = array_ops.concat([attention_query, context], 1)
      attention = layers.linear(
          concat_input, num_units, biases_initializer=None, scope=scope)
      return attention

    return construct_fn


# keys: [batch_size, attention_length, attn_size]
# query: [batch_size, 1, attn_size]
# return weights [batch_size, attention_length] 
开发者ID:kepei1106,项目名称:SentenceFunction,代码行数:30,代码来源:my_attention_decoder_fn.py

示例4: create_output_fn

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import linear [as 别名]
def create_output_fn(vocab_size):
    with variable_scope.variable_scope("output_fn") as scope:
        def output_fn(x):
            return layers.linear(x, vocab_size, scope=scope)

        return output_fn 
开发者ID:siat-nlp,项目名称:TransDG,代码行数:8,代码来源:attention_decoder.py

示例5: create_hidden_fn

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import linear [as 别名]
def create_hidden_fn(num_units):
    with variable_scope.variable_scope("hidden_fn") as scope:
        def hidden_fn(x):
            return layers.linear(x, num_units, scope=scope)

        return hidden_fn 
开发者ID:siat-nlp,项目名称:TransDG,代码行数:8,代码来源:attention_decoder.py

示例6: prepare_multistep_attention

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import linear [as 别名]
def prepare_multistep_attention(encoder_states,
                                decoder_reprs,
                                kd_states1,
                                kd_states2,
                                attention_option,
                                num_units,
                                reuse=False):
    # Prepare attention keys / values from attention_states
    with variable_scope.variable_scope("attn_keys", reuse=reuse) as scope:
        attention_keys1 = layers.linear(encoder_states, num_units, biases_initializer=None, scope=scope)
        attention_values1 = encoder_states
        # Attention scoring function
        attention_score_fn1 = _create_attention_score_fn("attn_score", num_units,
                                                         attention_option, reuse)

    with variable_scope.variable_scope("attn_reprs", reuse=reuse) as scope:
        if decoder_reprs is not None:
            attention_keys2 = layers.linear(decoder_reprs, num_units, biases_initializer=None, scope=scope)
        else:
            attention_keys2 = None
        attention_values2 = decoder_reprs
        # Attention scoring function
        attention_score_fn2 = _create_attention_score_fn("attn_score", num_units,
                                                         attention_option, reuse)

    attention_keys = (attention_keys1, attention_keys2)
    if kd_states1 is not None and kd_states2 is not None:
        attention_values = (attention_values1, attention_values2, kd_states1, kd_states2)
    else:
        attention_values = (attention_values1, attention_values2, None, None)
    attention_score_fn = (attention_score_fn1, attention_score_fn2)

    # Attention construction function
    attention_construct_fn = _create_attention_construct_fn("attn_construct_multi",
                                                            num_units, attention_score_fn, reuse)

    return attention_keys, attention_values, attention_construct_fn 
开发者ID:siat-nlp,项目名称:TransDG,代码行数:39,代码来源:attention_decoder.py

示例7: _create_attention_construct_fn

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import linear [as 别名]
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
    """Function to compute attention vectors.
    Args:
        name: to label variables.
        num_units: hidden state dimension.
        attention_score_fn: to compute similarity between key and target states.
        reuse: whether to reuse variable scope.
    Returns:
        attention_construct_fn: to build attention states.
    """
    with variable_scope.variable_scope(name, reuse=reuse) as scope:

        def construct_fn(attention_query, attention_keys, attention_values):
            if isinstance(attention_score_fn, tuple):  # multi-step decoding
                attention_score_fn1, attention_score_fn2 = attention_score_fn
                attention_keys1, attention_keys2 = attention_keys
                attention_values1, decoder_reprs, kd_states1, kd_states2 = attention_values
                context1 = attention_score_fn1(attention_query, attention_keys1, attention_values1)
                if kd_states1 is None or kd_states2 is None:
                    context2 = attention_score_fn2(attention_query, attention_keys2, decoder_reprs)
                    concat_input = array_ops.concat([attention_query, context1, context2], 1)
                else:
                    if decoder_reprs is None:
                        concat_input = array_ops.concat([attention_query, context1, kd_states1, kd_states2], 1)
                    else:
                        context2 = attention_score_fn2(attention_query, attention_keys2, decoder_reprs)
                        concat_input = array_ops.concat([attention_query, context1, context2, kd_states1, kd_states2], 1)
            else:  # only one step decoding
                if isinstance(attention_values, tuple):
                    attention_values1, kd_state = attention_values
                    context1 = attention_score_fn(attention_query, attention_keys, attention_values1)
                    concat_input = array_ops.concat([attention_query, context1, kd_state], 1)
                else:
                    context = attention_score_fn(attention_query, attention_keys, attention_values)
                    concat_input = array_ops.concat([attention_query, context], 1)

            attention = layers.linear(concat_input, num_units, biases_initializer=None, scope=scope)
            return attention

        return construct_fn 
开发者ID:siat-nlp,项目名称:TransDG,代码行数:42,代码来源:attention_decoder.py

示例8: _create_attention_construct_fn

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import linear [as 别名]
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
    """Function to compute attention vectors.
    Args:
        name: to label variables.
        num_units: hidden state dimension.
        attention_score_fn: to compute similarity between key and target states.
        reuse: whether to reuse variable scope.
    Returns:
        attention_construct_fn: to build attention states.
    """
    with variable_scope.variable_scope(name, reuse=reuse) as scope:

        def construct_fn(attention_query, attention_keys, attention_values):
            alignments = None
            if type(attention_score_fn) is tuple:
                context0 = attention_score_fn[0](attention_query, attention_keys[0],
                                                                         attention_values[0])
                if len(attention_keys) == 2:
                    context1 = attention_score_fn[1](attention_query, attention_keys[1],
                                                                             attention_values[1])
                elif len(attention_keys) == 3:
                    context1 = attention_score_fn[1](attention_query, attention_keys[1:],
                            attention_values[1:])
                if type(context1) is tuple:
                    if len(context1) == 2:
                        context1, alignments = context1
                        concat_input = array_ops.concat([attention_query, context0, context1], 1)
                    elif len(context1) == 3:
                        context1, context2, alignments = context1
                        concat_input = array_ops.concat([attention_query, context0, context1, context2], 1)
                else:
                    concat_input = array_ops.concat([attention_query, context0, context1], 1)
            else:
                context = attention_score_fn(attention_query, attention_keys,
                                                                         attention_values)
                concat_input = array_ops.concat([attention_query, context], 1)
            attention = layers.linear(
                    concat_input, num_units, biases_initializer=None, scope=scope)
            if alignments is None:
                return attention
            else:
                return attention, alignments

        return construct_fn


# keys: [batch_size, attention_length, attn_size]
# query: [batch_size, 1, attn_size]
# return weights [batch_size, attention_length] 
开发者ID:thu-coai,项目名称:ccm,代码行数:51,代码来源:attention_decoder.py


注:本文中的tensorflow.contrib.layers.python.layers.layers.linear方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。