當前位置: 首頁>>代碼示例>>Python>>正文


Python transformer.transformer_tiny方法代碼示例

本文整理匯總了Python中tensor2tensor.models.transformer.transformer_tiny方法的典型用法代碼示例。如果您正苦於以下問題:Python transformer.transformer_tiny方法的具體用法?Python transformer.transformer_tiny怎麽用?Python transformer.transformer_tiny使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensor2tensor.models.transformer的用法示例。


在下文中一共展示了transformer.transformer_tiny方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _create_greedy_infer_model

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def _create_greedy_infer_model(self):
    """Creates model for greedy inference testing.

    Returns:
      model: A t2t model.
      features: An map of string to tensor.
    """
    model, features = get_model(transformer.transformer_tiny())

    out_logits, _ = model(features)
    out_logits = tf.squeeze(out_logits, axis=[2, 3])
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
        labels=tf.reshape(features["targets"], [-1]))
    loss = tf.reduce_mean(loss)
    apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)

    with self.test_session():
      tf.global_variables_initializer().run()
      for _ in range(10):
        apply_grad.run()

    model.set_mode(tf.estimator.ModeKeys.PREDICT)

    return model, features 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:27,代碼來源:evolved_transformer_test.py

示例2: universal_transformer_tiny

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def universal_transformer_tiny():
  hparams = transformer.transformer_tiny()
  hparams = update_hparams_for_universal_transformer(hparams)
  hparams.num_rec_steps = 8
  return hparams 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:7,代碼來源:universal_transformer.py

示例3: get_model

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def get_model(hparams=None, mode=tf.estimator.ModeKeys.TRAIN,
              has_input=True, model_cls=transformer.Transformer):
  if hparams is None:
    hparams = transformer.transformer_tiny()
  hparams.hidden_size = 8
  hparams.filter_size = 32
  hparams.num_heads = 1
  hparams.layer_prepostprocess_dropout = 0.0

  p_hparams = problem_hparams.test_problem_hparams(VOCAB_SIZE, VOCAB_SIZE)
  if not has_input:
    p_hparams.input_modality = {}
  hparams.problem_hparams = p_hparams

  inputs = -1 + np.random.random_integers(
      VOCAB_SIZE, size=(BATCH_SIZE, INPUT_LENGTH, 1, 1))
  targets = -1 + np.random.random_integers(
      VOCAB_SIZE, size=(BATCH_SIZE, TARGET_LENGTH, 1, 1))
  features = {
      "targets": tf.constant(targets, dtype=tf.int32, name="targets"),
      "target_space_id": tf.constant(1, dtype=tf.int32)
  }
  if has_input:
    features["inputs"] = tf.constant(inputs, dtype=tf.int32, name="inputs")

  return model_cls(hparams, mode, p_hparams), features 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:28,代碼來源:transformer_test.py

示例4: transformer_aux_tiny

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def transformer_aux_tiny():
  """Set of hyperparameters."""
  hparams = transformer.transformer_tiny()
  hparams.shared_embedding_and_softmax_weights = False
  hparams.add_hparam("shift_values", "1,2")
  return hparams 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:8,代碼來源:transformer_aux.py

示例5: transformer_tiny_bs1

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def transformer_tiny_bs1():
  hparams = transformer.transformer_tiny()
  hparams.add_hparam("block_size", 1)
  return hparams 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:6,代碼來源:transformer_parallel.py

示例6: transformer_tiny_bs2

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def transformer_tiny_bs2():
  hparams = transformer.transformer_tiny()
  hparams.add_hparam("block_size", 2)
  return hparams 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:6,代碼來源:transformer_parallel.py

示例7: transformer_tiny_bs3

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def transformer_tiny_bs3():
  hparams = transformer.transformer_tiny()
  hparams.add_hparam("block_size", 3)
  return hparams 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:6,代碼來源:transformer_parallel.py

示例8: testEvolvedTransformer

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def testEvolvedTransformer(self):
    model, features = get_model(hparams=transformer.transformer_tiny())
    logits, _ = model(features)
    with self.test_session() as session:
      session.run(tf.global_variables_initializer())
      res = session.run(logits)
    self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, 1, 1, VOCAB_SIZE)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:9,代碼來源:evolved_transformer_test.py

示例9: testSlowVsFast

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def testSlowVsFast(self):
    tf.set_random_seed(1234)
    model, features = get_model(transformer.transformer_tiny())

    decode_length = DECODE_LENGTH

    out_logits, _ = model(features)
    out_logits = tf.squeeze(out_logits, axis=[2, 3])
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
        labels=tf.reshape(features["targets"], [-1]))
    loss = tf.reduce_mean(loss)
    apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)

    with self.test_session():
      tf.global_variables_initializer().run()
      for _ in range(10):
        apply_grad.run()

    model.set_mode(tf.estimator.ModeKeys.PREDICT)

    with tf.variable_scope(tf.get_variable_scope(), reuse=True):
      greedy_result = model._slow_greedy_infer(features,
                                               decode_length)["outputs"]
      greedy_result = tf.squeeze(greedy_result, axis=[2, 3])

      fast_result = model._greedy_infer(features, decode_length)["outputs"]

    with self.test_session():
      greedy_res = greedy_result.eval()
      fast_res = fast_result.eval()

    self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length))
    self.assertAllClose(greedy_res, fast_res) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:36,代碼來源:evolved_transformer_test.py

示例10: testSlowVsFastNoInput

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def testSlowVsFastNoInput(self):
    model, features = get_model(transformer.transformer_tiny(), has_input=False)

    decode_length = DECODE_LENGTH

    out_logits, _ = model(features)
    out_logits = tf.squeeze(out_logits, axis=[2, 3])
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
        labels=tf.reshape(features["targets"], [-1]))
    loss = tf.reduce_mean(loss)
    apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)

    with self.test_session():
      tf.global_variables_initializer().run()
      for _ in range(10):
        apply_grad.run()

    model.set_mode(tf.estimator.ModeKeys.PREDICT)

    with tf.variable_scope(tf.get_variable_scope(), reuse=True):
      slow_result = model._slow_greedy_infer(features, decode_length)["outputs"]
      slow_result = tf.squeeze(slow_result, axis=[2, 3])

      fast_result = model._greedy_infer(features, decode_length)["outputs"]

    with self.test_session():
      slow_res = slow_result.eval()
      fast_res = fast_result.eval()

    self.assertEqual(slow_res.shape, (BATCH_SIZE, decode_length))
    self.assertAllClose(slow_res, fast_res) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:34,代碼來源:evolved_transformer_test.py

示例11: testBeamVsFast

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def testBeamVsFast(self):
    model, features = get_model(transformer.transformer_tiny())

    decode_length = DECODE_LENGTH

    out_logits, _ = model(features)
    out_logits = tf.squeeze(out_logits, axis=[2, 3])
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
        labels=tf.reshape(features["targets"], [-1]))
    loss = tf.reduce_mean(loss)
    apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)

    with self.test_session():
      tf.global_variables_initializer().run()
      for _ in range(10):
        apply_grad.run()

    model.set_mode(tf.estimator.ModeKeys.PREDICT)

    with tf.variable_scope(tf.get_variable_scope(), reuse=True):
      beam_result = model._beam_decode_slow(
          features, decode_length, beam_size=4, top_beams=1,
          alpha=1.0)["outputs"]

      fast_result = model._beam_decode(
          features, decode_length, beam_size=4, top_beams=1,
          alpha=1.0)["outputs"]

    with self.test_session():
      beam_res = beam_result.eval()
      fast_res = fast_result.eval()

    self.assertAllClose(beam_res, fast_res) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:36,代碼來源:evolved_transformer_test.py

示例12: testSlowVsFast

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def testSlowVsFast(self):
    model, features = get_model(transformer.transformer_tiny())

    decode_length = DECODE_LENGTH

    out_logits, _ = model(features)
    out_logits = tf.squeeze(out_logits, axis=[2, 3])
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
        labels=tf.reshape(features["targets"], [-1]))
    loss = tf.reduce_mean(loss)
    apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)

    with self.test_session():
      tf.global_variables_initializer().run()
      for _ in range(10):
        apply_grad.run()

    model.set_mode(tf.estimator.ModeKeys.PREDICT)

    with tf.variable_scope(tf.get_variable_scope(), reuse=True):
      greedy_result = model._slow_greedy_infer(
          features, decode_length)["outputs"]
      greedy_result = tf.squeeze(greedy_result, axis=[2, 3])

      fast_result = model._greedy_infer(features, decode_length)["outputs"]

    with self.test_session():
      greedy_res = greedy_result.eval()
      fast_res = fast_result.eval()

    self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length))
    self.assertAllClose(greedy_res, fast_res) 
開發者ID:yyht,項目名稱:BERT,代碼行數:35,代碼來源:evolved_transformer_test.py

示例13: testSlowVsFastNoInput

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def testSlowVsFastNoInput(self):
    model, features = get_model(
        transformer.transformer_tiny(), has_input=False)

    decode_length = DECODE_LENGTH

    out_logits, _ = model(features)
    out_logits = tf.squeeze(out_logits, axis=[2, 3])
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
        labels=tf.reshape(features["targets"], [-1]))
    loss = tf.reduce_mean(loss)
    apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)

    with self.test_session():
      tf.global_variables_initializer().run()
      for _ in range(10):
        apply_grad.run()

    model.set_mode(tf.estimator.ModeKeys.PREDICT)

    with tf.variable_scope(tf.get_variable_scope(), reuse=True):
      slow_result = model._slow_greedy_infer(
          features, decode_length)["outputs"]
      slow_result = tf.squeeze(slow_result, axis=[2, 3])

      fast_result = model._greedy_infer(features, decode_length)["outputs"]

    with self.test_session():
      slow_res = slow_result.eval()
      fast_res = fast_result.eval()

    self.assertEqual(slow_res.shape, (BATCH_SIZE, decode_length))
    self.assertAllClose(slow_res, fast_res) 
開發者ID:yyht,項目名稱:BERT,代碼行數:36,代碼來源:evolved_transformer_test.py

示例14: testBeamVsFast

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def testBeamVsFast(self):
    model, features = get_model(transformer.transformer_tiny())

    decode_length = DECODE_LENGTH

    out_logits, _ = model(features)
    out_logits = tf.squeeze(out_logits, axis=[2, 3])
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
        labels=tf.reshape(features["targets"], [-1]))
    loss = tf.reduce_mean(loss)
    apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)

    with self.test_session():
      tf.global_variables_initializer().run()
      for _ in range(10):
        apply_grad.run()

    model.set_mode(tf.estimator.ModeKeys.PREDICT)

    with tf.variable_scope(tf.get_variable_scope(), reuse=True):
      beam_result = model._beam_decode_slow(
          features,
          decode_length,
          beam_size=4,
          top_beams=1,
          alpha=1.0)["outputs"]

      fast_result = model._beam_decode(
          features,
          decode_length,
          beam_size=4,
          top_beams=1,
          alpha=1.0)["outputs"]

    with self.test_session():
      beam_res = beam_result.eval()
      fast_res = fast_result.eval()

    self.assertAllClose(beam_res, fast_res) 
開發者ID:yyht,項目名稱:BERT,代碼行數:42,代碼來源:evolved_transformer_test.py

示例15: get_model

# 需要導入模塊: from tensor2tensor.models import transformer [as 別名]
# 或者: from tensor2tensor.models.transformer import transformer_tiny [as 別名]
def get_model(hparams=None, mode=tf.estimator.ModeKeys.TRAIN,
              has_input=True, model_cls=transformer.Transformer):
  if hparams is None:
    hparams = transformer.transformer_tiny()
  hparams.hidden_size = 8
  hparams.filter_size = 32
  hparams.num_heads = 1
  hparams.layer_prepostprocess_dropout = 0.0

  p_hparams = problem_hparams.test_problem_hparams(VOCAB_SIZE,
                                                   VOCAB_SIZE,
                                                   hparams)
  if not has_input:
    del p_hparams.modality["inputs"]
  hparams.problem_hparams = p_hparams

  inputs = np.random.randint(
      VOCAB_SIZE, size=(BATCH_SIZE, INPUT_LENGTH, 1, 1))
  targets = np.random.randint(
      VOCAB_SIZE, size=(BATCH_SIZE, TARGET_LENGTH, 1, 1))
  features = {
      "targets": tf.constant(targets, dtype=tf.int32, name="targets"),
      "target_space_id": tf.constant(1, dtype=tf.int32)
  }
  if has_input:
    features["inputs"] = tf.constant(inputs, dtype=tf.int32, name="inputs")

  return model_cls(hparams, mode, p_hparams), features 
開發者ID:yyht,項目名稱:BERT,代碼行數:30,代碼來源:transformer_test.py


注:本文中的tensor2tensor.models.transformer.transformer_tiny方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。