当前位置: 首页>>代码示例>>Python>>正文


Python variables.get_or_create_global_step方法代码示例

本文整理汇总了Python中tensorflow.contrib.framework.python.ops.variables.get_or_create_global_step方法的典型用法代码示例。如果您正苦于以下问题:Python variables.get_or_create_global_step方法的具体用法?Python variables.get_or_create_global_step怎么用?Python variables.get_or_create_global_step使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.framework.python.ops.variables的用法示例。


在下文中一共展示了variables.get_or_create_global_step方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testUseGlobalStep

# 需要导入模块: from tensorflow.contrib.framework.python.ops import variables [as 别名]
# 或者: from tensorflow.contrib.framework.python.ops.variables import get_or_create_global_step [as 别名]
def testUseGlobalStep(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      global_step = variables_lib2.get_or_create_global_step()

      with session.Session() as sess:
        # Initialize all variables
        sess.run(variables_lib.global_variables_initializer())

        for _ in range(10):
          sess.run([train_op])
        global_step = global_step.eval()
        # After 10 updates global_step should be 10.
        self.assertAllClose(global_step, 10) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:26,代码来源:learning_test.py

示例2: testNoneGlobalStep

# 需要导入模块: from tensorflow.contrib.framework.python.ops import variables [as 别名]
# 或者: from tensorflow.contrib.framework.python.ops.variables import get_or_create_global_step [as 别名]
def testNoneGlobalStep(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(
          total_loss, optimizer, global_step=None)

      global_step = variables_lib2.get_or_create_global_step()

      with session.Session() as sess:
        # Initialize all variables
        sess.run(variables_lib.global_variables_initializer())

        for _ in range(10):
          sess.run([train_op])
        global_step = global_step.eval()
        # Since train_op don't use global_step it shouldn't change.
        self.assertAllClose(global_step, 0) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:27,代码来源:learning_test.py

示例3: begin

# 需要导入模块: from tensorflow.contrib.framework.python.ops import variables [as 别名]
# 或者: from tensorflow.contrib.framework.python.ops.variables import get_or_create_global_step [as 别名]
def begin(self):
    if self._replace_summary_op:
      self._summary_op = summary.merge_all()
    self._global_step = variables.get_or_create_global_step() 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:6,代码来源:evaluation.py

示例4: setUp

# 需要导入模块: from tensorflow.contrib.framework.python.ops import variables [as 别名]
# 或者: from tensorflow.contrib.framework.python.ops.variables import get_or_create_global_step [as 别名]
def setUp(self):
    super(EvaluationTest, self).setUp()

    num_classes = 8
    batch_size = 16
    inputs, labels = GenerateTestData(num_classes, batch_size)
    self._expected_accuracy = GroundTruthAccuracy(inputs, labels, batch_size)

    self._global_step = variables_lib.get_or_create_global_step()
    self._inputs = constant_op.constant(inputs, dtype=dtypes.float32)
    self._labels = constant_op.constant(labels, dtype=dtypes.int64)
    self._predictions, self._scale = TestModel(self._inputs) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:14,代码来源:evaluation_test.py

示例5: __init__

# 需要导入模块: from tensorflow.contrib.framework.python.ops import variables [as 别名]
# 或者: from tensorflow.contrib.framework.python.ops.variables import get_or_create_global_step [as 别名]
def __init__(self, log_dir, summary_op=None, feed_dict=None):
    """Constructs the Summary Hook.

    Args:
      log_dir: The directory where the logs are saved to.
      summary_op: The summary op to run. If left as `None`, then all summaries
        in the tf.GraphKeys.SUMMARIES collection are used.
      feed_dict: An optional feed dictionary to use when evaluating the
        summaries.
    """
    self._summary_op = summary_op
    self._feed_dict = feed_dict
    self._summary_writer = summary_io.SummaryWriter(log_dir)
    self._global_step = variables.get_or_create_global_step() 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:16,代码来源:evaluation.py

示例6: __init__

# 需要导入模块: from tensorflow.contrib.framework.python.ops import variables [as 别名]
# 或者: from tensorflow.contrib.framework.python.ops.variables import get_or_create_global_step [as 别名]
def __init__(self, corpus, **opts):
        self.corpus = corpus

        self.opts = opts

        self.global_step = get_or_create_global_step()
        self.increment_global_step_op = tf.assign(self.global_step, self.global_step + 1, name="increment_global_step")

        self.corpus_size = get_corpus_size(self.corpus["train"])
        self.corpus_size_valid = get_corpus_size(self.corpus["valid"])

        self.word2idx, self.idx2word = build_vocab(self.corpus["train"])
        self.vocab_size = len(self.word2idx)

        self.generator_template = tf.make_template(GENERATOR_PREFIX, generator)
        self.discriminator_template = tf.make_template(DISCRIMINATOR_PREFIX, discriminator)

        self.enqueue_data, _, source, target, sequence_length = \
            prepare_data(self.corpus["train"], self.word2idx, num_threads=7, **self.opts)

        # TODO: option to either do pretrain or just generate?
        self.g_tensors_pretrain = self.generator_template(
            source, target, sequence_length, self.vocab_size, **self.opts)

        self.enqueue_data_valid, self.input_ph, source_valid, target_valid, sequence_length_valid = \
            prepare_data(self.corpus["valid"], self.word2idx, num_threads=1, **self.opts)

        self.g_tensors_pretrain_valid = self.generator_template(
            source_valid, target_valid, sequence_length_valid, self.vocab_size, **self.opts)

        self.decoder_fn = prepare_custom_decoder(
            sequence_length, self.g_tensors_pretrain.embedding_matrix, self.g_tensors_pretrain.output_projections)

        self.g_tensors_fake = self.generator_template(
            source, target, sequence_length, self.vocab_size, decoder_fn=self.decoder_fn, **self.opts)

        self.g_tensors_fake_valid = self.generator_template(
            source_valid, target_valid, sequence_length_valid, self.vocab_size, decoder_fn=self.decoder_fn, **self.opts)

        # TODO: using the rnn outputs from pretraining as "real" instead of target embeddings (aka professor forcing)
        self.d_tensors_real = self.discriminator_template(
            self.g_tensors_pretrain.rnn_outputs, sequence_length, is_real=True, **self.opts)

        # TODO: check to see if sequence_length is correct
        self.d_tensors_fake = self.discriminator_template(
            self.g_tensors_fake.rnn_outputs, None, is_real=False, **self.opts)

        self.g_tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=GENERATOR_PREFIX)
        self.d_tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=DISCRIMINATOR_PREFIX) 
开发者ID:tokestermw,项目名称:text-gan-tensorflow,代码行数:51,代码来源:model.py


注:本文中的tensorflow.contrib.framework.python.ops.variables.get_or_create_global_step方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。