當前位置: 首頁>>代碼示例>>Python>>正文


Python common_attention.add_timing_signal_nd方法代碼示例

本文整理匯總了Python中tensor2tensor.layers.common_attention.add_timing_signal_nd方法的典型用法代碼示例。如果您正苦於以下問題:Python common_attention.add_timing_signal_nd方法的具體用法?Python common_attention.add_timing_signal_nd怎麽用?Python common_attention.add_timing_signal_nd使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensor2tensor.layers.common_attention的用法示例。


在下文中一共展示了common_attention.add_timing_signal_nd方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: add_pos_signals

# 需要導入模塊: from tensor2tensor.layers import common_attention [as 別名]
# 或者: from tensor2tensor.layers.common_attention import add_timing_signal_nd [as 別名]
def add_pos_signals(x, hparams, name="pos_emb"):
  with tf.variable_scope(name, reuse=False):
    if hparams.pos == "timing":
      x = common_attention.add_timing_signal_nd(x)
    else:
      assert hparams.pos == "emb"
      x = common_attention.add_positional_embedding_nd(
          x, hparams.max_length, name)
  return x 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:11,代碼來源:common_image_attention.py

示例2: embed

# 需要導入模塊: from tensor2tensor.layers import common_attention [as 別名]
# 或者: from tensor2tensor.layers.common_attention import add_timing_signal_nd [as 別名]
def embed(self, x, name="embedding"):
    """Input embedding with a non-zero bias for uniform inputs."""
    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
      x_shape = common_layers.shape_list(x)
      # Merge channels and depth before embedding.
      x = tf.reshape(x, x_shape[:-2] + [x_shape[-2] * x_shape[-1]])
      x = tf.layers.dense(
          x,
          self.hparams.hidden_size,
          name="embed",
          activation=common_layers.belu,
          bias_initializer=tf.random_normal_initializer(stddev=0.01))
      x = common_layers.layer_norm(x, name="ln_embed")
      return common_attention.add_timing_signal_nd(x) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:16,代碼來源:autoencoders.py

示例3: encoder

# 需要導入模塊: from tensor2tensor.layers import common_attention [as 別名]
# 或者: from tensor2tensor.layers.common_attention import add_timing_signal_nd [as 別名]
def encoder(self, x):
    with tf.variable_scope("encoder"):
      hparams = self.hparams
      kernel, strides = self._get_kernel_and_strides()
      residual_kernel = (hparams.residual_kernel_height,
                         hparams.residual_kernel_width)
      residual_kernel1d = (hparams.residual_kernel_height, 1)
      residual_kernel = residual_kernel1d if self.is1d else residual_kernel
      residual_conv = tf.layers.conv2d
      if hparams.residual_use_separable_conv:
        residual_conv = tf.layers.separable_conv2d
      # Input embedding with a non-zero bias for uniform inputs.
      x = tf.layers.dense(
          x,
          hparams.hidden_size,
          name="embed",
          activation=common_layers.belu,
          bias_initializer=tf.random_normal_initializer(stddev=0.01))
      x = common_attention.add_timing_signal_nd(x)
      # Down-convolutions.
      for i in range(hparams.num_hidden_layers):
        with tf.variable_scope("layer_%d" % i):
          x = self.make_even_size(x)
          x = self.dropout(x)
          filters = hparams.hidden_size * 2**(i + 1)
          filters = min(filters, hparams.max_hidden_size)
          x = tf.layers.conv2d(
              x,
              filters,
              kernel,
              strides=strides,
              padding="SAME",
              activation=common_layers.belu,
              name="strided")
          y = x
          for r in range(hparams.num_residual_layers):
            residual_filters = filters
            if r < hparams.num_residual_layers - 1:
              residual_filters = int(
                  filters * hparams.residual_filter_multiplier)
            y = residual_conv(
                y,
                residual_filters,
                residual_kernel,
                padding="SAME",
                activation=common_layers.belu,
                name="residual_%d" % r)
          x += tf.nn.dropout(y, 1.0 - hparams.residual_dropout)
          x = common_layers.layer_norm(x)
      return x 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:52,代碼來源:autoencoders.py

示例4: decoder

# 需要導入模塊: from tensor2tensor.layers import common_attention [as 別名]
# 或者: from tensor2tensor.layers.common_attention import add_timing_signal_nd [as 別名]
def decoder(self, x):
    with tf.variable_scope("decoder"):
      hparams = self.hparams
      kernel, strides = self._get_kernel_and_strides()
      residual_kernel = (hparams.residual_kernel_height,
                         hparams.residual_kernel_width)
      residual_kernel1d = (hparams.residual_kernel_height, 1)
      residual_kernel = residual_kernel1d if self.is1d else residual_kernel
      residual_conv = tf.layers.conv2d
      if hparams.residual_use_separable_conv:
        residual_conv = tf.layers.separable_conv2d
      # Up-convolutions.
      for i in range(hparams.num_hidden_layers):
        j = hparams.num_hidden_layers - i - 1
        filters = hparams.hidden_size * 2**j
        filters = min(filters, hparams.max_hidden_size)
        with tf.variable_scope("layer_%d" % i):
          j = hparams.num_hidden_layers - i - 1
          filters = hparams.hidden_size * 2**j
          x = tf.layers.conv2d_transpose(
              x,
              filters,
              kernel,
              strides=strides,
              padding="SAME",
              activation=common_layers.belu,
              name="strided")
          y = x
          for r in range(hparams.num_residual_layers):
            residual_filters = filters
            if r < hparams.num_residual_layers - 1:
              residual_filters = int(
                  filters * hparams.residual_filter_multiplier)
            y = residual_conv(
                y,
                residual_filters,
                residual_kernel,
                padding="SAME",
                activation=common_layers.belu,
                name="residual_%d" % r)
          x += tf.nn.dropout(y, 1.0 - hparams.residual_dropout)
          x = common_layers.layer_norm(x)
          x = common_attention.add_timing_signal_nd(x)
      return x 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:46,代碼來源:autoencoders.py

示例5: encoder

# 需要導入模塊: from tensor2tensor.layers import common_attention [as 別名]
# 或者: from tensor2tensor.layers.common_attention import add_timing_signal_nd [as 別名]
def encoder(self, x):
    with tf.variable_scope("encoder"):
      hparams = self.hparams
      layers = []
      kernel, strides = self._get_kernel_and_strides()
      residual_kernel = (hparams.residual_kernel_height,
                         hparams.residual_kernel_width)
      residual_kernel1d = (hparams.residual_kernel_height, 1)
      residual_kernel = residual_kernel1d if self.is1d else residual_kernel
      residual_conv = tf.layers.conv2d
      if hparams.residual_use_separable_conv:
        residual_conv = tf.layers.separable_conv2d
      # Down-convolutions.
      for i in range(hparams.num_hidden_layers):
        with tf.variable_scope("layer_%d" % i):
          x = self.make_even_size(x)
          layers.append(x)
          x = self.dropout(x)
          filters = hparams.hidden_size * 2**(i + 1)
          filters = min(filters, hparams.max_hidden_size)
          x = common_attention.add_timing_signal_nd(x)
          x = tf.layers.conv2d(
              x,
              filters,
              kernel,
              strides=strides,
              padding="SAME",
              activation=common_layers.belu,
              name="strided")
          y = x
          y = tf.nn.dropout(y, 1.0 - hparams.residual_dropout)
          for r in range(hparams.num_residual_layers):
            residual_filters = filters
            if r < hparams.num_residual_layers - 1:
              residual_filters = int(
                  filters * hparams.residual_filter_multiplier)
            y = residual_conv(
                y,
                residual_filters,
                residual_kernel,
                padding="SAME",
                activation=common_layers.belu,
                name="residual_%d" % r)
          x += y
          x = common_layers.layer_norm(x, name="ln")
      return x, layers 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:48,代碼來源:autoencoders.py

示例6: decoder

# 需要導入模塊: from tensor2tensor.layers import common_attention [as 別名]
# 或者: from tensor2tensor.layers.common_attention import add_timing_signal_nd [as 別名]
def decoder(self, x, encoder_layers=None):
    with tf.variable_scope("decoder"):
      hparams = self.hparams
      is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN
      kernel, strides = self._get_kernel_and_strides()
      residual_kernel = (hparams.residual_kernel_height,
                         hparams.residual_kernel_width)
      residual_kernel1d = (hparams.residual_kernel_height, 1)
      residual_kernel = residual_kernel1d if self.is1d else residual_kernel
      residual_conv = tf.layers.conv2d
      if hparams.residual_use_separable_conv:
        residual_conv = tf.layers.separable_conv2d
      # Up-convolutions.
      for i in range(hparams.num_hidden_layers):
        j = hparams.num_hidden_layers - i - 1
        if is_training:
          nomix_p = common_layers.inverse_lin_decay(
              int(hparams.bottleneck_warmup_steps * 0.25 * 2**j)) + 0.01
          if common_layers.should_generate_summaries():
            tf.summary.scalar("nomix_p_%d" % j, nomix_p)
        filters = hparams.hidden_size * 2**j
        filters = min(filters, hparams.max_hidden_size)
        with tf.variable_scope("layer_%d" % i):
          j = hparams.num_hidden_layers - i - 1
          x = tf.layers.conv2d_transpose(
              x,
              filters,
              kernel,
              strides=strides,
              padding="SAME",
              activation=common_layers.belu,
              name="strided")
          y = x
          for r in range(hparams.num_residual_layers):
            residual_filters = filters
            if r < hparams.num_residual_layers - 1:
              residual_filters = int(
                  filters * hparams.residual_filter_multiplier)
            y = residual_conv(
                y,
                residual_filters,
                residual_kernel,
                padding="SAME",
                activation=common_layers.belu,
                name="residual_%d" % r)
          x += tf.nn.dropout(y, 1.0 - hparams.residual_dropout)
          x = common_layers.layer_norm(x, name="ln")
          x = common_attention.add_timing_signal_nd(x)
          if encoder_layers is not None:
            enc_x = encoder_layers[j]
            enc_shape = common_layers.shape_list(enc_x)
            x_mix = x[:enc_shape[0], :enc_shape[1], :enc_shape[2], :]
            if is_training:  # Mix at the beginning of training.
              rand = tf.random_uniform(common_layers.shape_list(x_mix))
              x_mix = tf.where(tf.less(rand, nomix_p), x_mix, enc_x)
            if hparams.gan_loss_factor != 0:
              x_gan = x[enc_shape[0]:, :enc_shape[1], :enc_shape[2], :]
              x = tf.concat([x_mix, x_gan], axis=0)
            else:
              x = x_mix
      return x 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:63,代碼來源:autoencoders.py


注:本文中的tensor2tensor.layers.common_attention.add_timing_signal_nd方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。