當前位置: 首頁>>代碼示例>>Python>>正文


Python common_layers.conv1d方法代碼示例

本文整理匯總了Python中tensor2tensor.layers.common_layers.conv1d方法的典型用法代碼示例。如果您正苦於以下問題:Python common_layers.conv1d方法的具體用法?Python common_layers.conv1d怎麽用?Python common_layers.conv1d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensor2tensor.layers.common_layers的用法示例。


在下文中一共展示了common_layers.conv1d方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: testConv1d

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import conv1d [as 別名]
def testConv1d(self):
    x = np.random.rand(5, 7, 11)
    with self.test_session() as session:
      y = common_layers.conv1d(tf.constant(x, dtype=tf.float32), 13, 1)
      session.run(tf.global_variables_initializer())
      res = session.run(y)
    self.assertEqual(res.shape, (5, 7, 13)) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:9,代碼來源:common_layers_test.py

示例2: compute_attention_component

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import conv1d [as 別名]
def compute_attention_component(antecedent,
                                total_depth,
                                filter_width=1,
                                padding="VALID",
                                name="c",
                                vars_3d_num_heads=0):
  """Computes attention compoenent (query, key or value).

  Args:
    antecedent: a Tensor with shape [batch, length, channels]
    total_depth: an integer
    filter_width: An integer specifying how wide you want the attention
      component to be.
    padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
    name: a string specifying scope name.
    vars_3d_num_heads: an optional integer (if we want to use 3d variables)

  Returns:
    c : [batch, length, depth] tensor
  """
  if vars_3d_num_heads > 0:
    assert filter_width == 1
    input_depth = antecedent.get_shape().as_list()[-1]
    depth_per_head = total_depth // vars_3d_num_heads
    initializer_stddev = input_depth ** -0.5
    if "q" in name:
      initializer_stddev *= depth_per_head ** -0.5
    var = tf.get_variable(
        name, [input_depth,
               vars_3d_num_heads,
               total_depth // vars_3d_num_heads],
        initializer=tf.random_normal_initializer(stddev=initializer_stddev))
    var = tf.cast(var, antecedent.dtype)
    var = tf.reshape(var, [input_depth, total_depth])
    return tf.tensordot(antecedent, var, axes=1)
  if filter_width == 1:
    return common_layers.dense(
        antecedent, total_depth, use_bias=False, name=name)
  else:
    return common_layers.conv1d(
        antecedent, total_depth, filter_width, padding, name=name) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:43,代碼來源:common_attention.py

示例3: testConv1d

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import conv1d [as 別名]
def testConv1d(self):
    x = np.random.rand(5, 7, 11)
    y = common_layers.conv1d(tf.constant(x, dtype=tf.float32), 13, 1)
    self.evaluate(tf.global_variables_initializer())
    res = self.evaluate(y)
    self.assertEqual(res.shape, (5, 7, 13)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:8,代碼來源:common_layers_test.py

示例4: compute_attention_component

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import conv1d [as 別名]
def compute_attention_component(antecedent,
                                total_depth,
                                filter_width=1,
                                padding="VALID",
                                name="c",
                                vars_3d_num_heads=0):
  """Computes attention compoenent (query, key or value).

  Args:
    antecedent: a Tensor with shape [batch, length, channels]
    total_depth: an integer
    filter_width: An integer specifying how wide you want the attention
      component to be.
    padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
    name: a string specifying scope name.
    vars_3d_num_heads: an optional integer (if we want to use 3d variables)

  Returns:
    c : [batch, length, depth] tensor
  """
  if vars_3d_num_heads > 0:
    assert filter_width == 1
    input_depth = antecedent.get_shape().as_list()[-1]
    depth_per_head = total_depth // vars_3d_num_heads
    initializer_stddev = input_depth ** -0.5
    if "q" in name:
      initializer_stddev *= depth_per_head ** -0.5
    var = tf.get_variable(
        name, [input_depth,
               vars_3d_num_heads,
               total_depth // vars_3d_num_heads],
        initializer=tf.random_normal_initializer(stddev=initializer_stddev))
    var = tf.cast(var, antecedent.dtype)
    var = tf.reshape(var, [input_depth, total_depth])
    return tf.tensordot(antecedent, var, axes=1)
  if filter_width == 1:
    return common_layers.dense(
        antecedent, total_depth, use_bias=False, name=name)
  else:
    return common_layers.conv1d(
        antecedent, total_depth, filter_width, padding=padding, name=name) 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:43,代碼來源:common_attention.py

示例5: testRecompute

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import conv1d [as 別名]
def testRecompute(self):

    def layer(x, name=None):
      with tf.variable_scope(name, default_name="layer"):
        x = tf.contrib.layers.layer_norm(x)
        x = tf.layers.conv1d(
            x,
            10,
            1,
            use_bias=False,
            kernel_initializer=tf.constant_initializer(42.42))
        x = tf.nn.relu(x)
        return x

    def fn(x):
      out = x
      for _ in range(3):
        out = layer(out)
      return out

    @common_layers.recompute_grad
    def fn_recompute(x):
      return fn(x)

    x = tf.random_uniform((3, 1, 3))
    recompute_vars = None
    with tf.variable_scope("recompute") as vs:
      out1 = tf.reduce_sum(fn_recompute(x))
      recompute_vars = vs.trainable_variables()
    reg_vars = None
    with tf.variable_scope("regular") as vs:
      out2 = tf.reduce_sum(fn(x))
      reg_vars = vs.trainable_variables()

    grad1 = tf.gradients(out1, recompute_vars)
    grad2 = tf.gradients(out2, reg_vars)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      outs = sess.run([out1, out2, grad1, grad2])
      self.assertAllClose(outs[0], outs[1])
      for g1, g2 in zip(outs[2], outs[3]):
        self.assertAllClose(g1, g2) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:45,代碼來源:common_layers_test.py

示例6: testRecompute

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import conv1d [as 別名]
def testRecompute(self):

    def layer(x, name=None):
      with tf.variable_scope(name, default_name="layer"):
        x = common_layers.layer_norm(x)
        x = tf.layers.conv1d(
            x,
            10,
            1,
            use_bias=False,
            kernel_initializer=tf.constant_initializer(42.42))
        x = tf.nn.relu(x)
        return x

    def fn(x):
      out = x
      for _ in range(3):
        out = layer(out)
      return out

    @common_layers.recompute_grad
    def fn_recompute(x):
      return fn(x)

    x = tf.random_uniform((3, 1, 3))
    recompute_vars = None
    with tf.variable_scope("recompute") as vs:
      out1 = tf.reduce_sum(fn_recompute(x))
      recompute_vars = vs.trainable_variables()
    reg_vars = None
    with tf.variable_scope("regular") as vs:
      out2 = tf.reduce_sum(fn(x))
      reg_vars = vs.trainable_variables()

    grad1 = tf.gradients(out1, recompute_vars)
    grad2 = tf.gradients(out2, reg_vars)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      outs = sess.run([out1, out2, grad1, grad2])
      self.assertAllClose(outs[0], outs[1])
      for g1, g2 in zip(outs[2], outs[3]):
        self.assertAllClose(g1, g2) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:45,代碼來源:common_layers_test.py


注:本文中的tensor2tensor.layers.common_layers.conv1d方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。