当前位置: 首页>>代码示例>>Python>>正文


Python tf_logging.set_verbosity方法代码示例

本文整理汇总了Python中tensorflow.python.platform.tf_logging.set_verbosity方法的典型用法代码示例。如果您正苦于以下问题:Python tf_logging.set_verbosity方法的具体用法?Python tf_logging.set_verbosity怎么用?Python tf_logging.set_verbosity使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.platform.tf_logging的用法示例。


在下文中一共展示了tf_logging.set_verbosity方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(logging.INFO)
  with tf.Session() as sess:
    src = gen_parser_ops.document_source(batch_size=32,
                                         corpus_name=FLAGS.corpus_name,
                                         task_context=FLAGS.task_context)
    sentence = sentence_pb2.Sentence()
    while True:
      documents, finished = sess.run(src)
      logging.info('Read %d documents', len(documents))
      for d in documents:
        sentence.ParseFromString(d)
        tr = asciitree.LeftAligned()
        d = to_dict(sentence)
        print 'Input: %s' % sentence.text
        print 'Parse:'
        tr_str = tr(d)
        pat = re.compile(r'\s*@\d+$')
        for tr_ln in tr_str.splitlines():
          print pat.sub('', tr_ln)

      if finished:
        break 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:25,代码来源:conll2tree.py

示例2: main

# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(logging.INFO)
  with tf.Session() as sess:
    src = gen_parser_ops.document_source(batch_size=32,
                                         corpus_name=FLAGS.corpus_name,
                                         task_context=FLAGS.task_context)
    sentence = sentence_pb2.Sentence()
    while True:
      documents, finished = sess.run(src)
      logging.info('Read %d documents', len(documents))
      for d in documents:
        sentence.ParseFromString(d)
        tr = asciitree.LeftAligned()
        d = to_dict(sentence)
        print('Input: %s' % sentence.text)
        print('Parse:')
        tr_str = tr(d)
        pat = re.compile(r'\s*@\d+$')
        for tr_ln in tr_str.splitlines():
          print(pat.sub('', tr_ln))

      if finished:
        break 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:25,代码来源:conll2tree.py

示例3: main

# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(logging.INFO)
  with tf.Session() as sess:
    src = gen_parser_ops.document_source(batch_size=32,
                                         corpus_name=FLAGS.corpus_name,
                                         task_context=FLAGS.task_context)
    sentence = sentence_pb2.Sentence()
    while True:
      documents, finished = sess.run(src)
      logging.info('Read %d documents', len(documents))
      for d in documents:
        sentence.ParseFromString(d)
        tr = asciitree.LeftAligned()
        d = to_dict(sentence)
        print 'Input: %s' % sentence.text
        print 'Parse:'
        print tr(d)

      if finished:
        break 
开发者ID:llSourcell,项目名称:AI_Reader,代码行数:22,代码来源:conll2tree.py

示例4: main

# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(logging.INFO)
  with tf.Session() as sess:
    Eval(sess) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:6,代码来源:parser_eval.py

示例5: load_model

# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import set_verbosity [as 别名]
def load_model(base_dir, master_spec_name, checkpoint_name):
    """
    Function to load the syntaxnet models. Highly specific to the tutorial
    format right now.
    """
    # Read the master spec
    master_spec = spec_pb2.MasterSpec()
    with open(os.path.join(base_dir, master_spec_name), "r") as f:
        text_format.Merge(f.read(), master_spec)
    spec_builder.complete_master_spec(master_spec, None, base_dir)
    logging.set_verbosity(logging.WARN)  # Turn off TensorFlow spam.

    # Initialize a graph
    graph = tf.Graph()
    with graph.as_default():
        hyperparam_config = spec_pb2.GridPoint()
        builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)
        # This is the component that will annotate test sentences.
        annotator = builder.add_annotation(enable_tracing=True)
        builder.add_saver()  # "Savers" can save and load models; here, we're only going to load.

    sess = tf.Session(graph=graph)
    with graph.as_default():
        #sess.run(tf.global_variables_initializer())
        #sess.run('save/restore_all', {'save/Const:0': os.path.join(base_dir, checkpoint_name)})
        builder.saver.restore(sess, os.path.join(base_dir, checkpoint_name))

    def annotate_sentence(sentence):
        with graph.as_default():
            return sess.run([annotator['annotations'], annotator['traces']],
                            feed_dict={annotator['input_batch']: [sentence]})
    return annotate_sentence 
开发者ID:hltcoe,项目名称:PredPatt,代码行数:34,代码来源:ParseyPredFace.py

示例6: main

# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(logging.INFO)
  with tf.Session() as sess:
    feature_sizes, domain_sizes, embedding_dims, num_actions = sess.run(
        gen_parser_ops.feature_size(task_context=FLAGS.task_context,
                                    arg_prefix=FLAGS.arg_prefix))

  with tf.Session() as sess:
    Eval(sess, num_actions, feature_sizes, domain_sizes, embedding_dims) 
开发者ID:llSourcell,项目名称:AI_Reader,代码行数:11,代码来源:parser_eval.py

示例7: load_model

# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import set_verbosity [as 别名]
def load_model(self,base_dir, master_spec_name, checkpoint_name):
        # Read the master spec
        master_spec = spec_pb2.MasterSpec()
        with open(os.path.join(base_dir, master_spec_name), "r") as f:
            text_format.Merge(f.read(), master_spec)
        spec_builder.complete_master_spec(master_spec, None, base_dir)
        logging.set_verbosity(logging.WARN)  # Turn off TensorFlow spam.

        # Initialize a graph
        graph = tf.Graph()
        with graph.as_default():
            hyperparam_config = spec_pb2.GridPoint()
            builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)
            # This is the component that will annotate test sentences.
            annotator = builder.add_annotation(enable_tracing=True)
            builder.add_saver()  # "Savers" can save and load models; here, we're only going to load.

        sess = tf.Session(graph=graph)
        with graph.as_default():
            # sess.run(tf.global_variables_initializer())
            # sess.run('save/restore_all', {'save/Const:0': os.path.join(base_dir, checkpoint_name)})
            builder.saver.restore(sess, os.path.join(base_dir, checkpoint_name))

        def annotate_sentence(sentence):
            with graph.as_default():
                return sess.run([annotator['annotations'], annotator['traces']],
                                feed_dict={annotator['input_batch']: [sentence]})

        return annotate_sentence 
开发者ID:ljm625,项目名称:syntaxnet-rest-api,代码行数:31,代码来源:dragnn_parser.py

示例8: main

# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(logging.INFO)
  if not gfile.IsDirectory(OutputPath('')):
    gfile.MakeDirs(OutputPath(''))

  # Rewrite context.
  RewriteContext()

  # Creates necessary term maps.
  if FLAGS.compute_lexicon:
    logging.info('Computing lexicon...')
    with tf.Session(FLAGS.tf_master) as sess:
      gen_parser_ops.lexicon_builder(task_context=OutputPath('context'),
                                     corpus_name=FLAGS.training_corpus).run()
  with tf.Session(FLAGS.tf_master) as sess:
    feature_sizes, domain_sizes, embedding_dims, num_actions = sess.run(
        gen_parser_ops.feature_size(task_context=OutputPath('context'),
                                    arg_prefix=FLAGS.arg_prefix))

  # Well formed and projectivize.
  if FLAGS.projectivize_training_set:
    logging.info('Preprocessing...')
    with tf.Session(FLAGS.tf_master) as sess:
      source, last = gen_parser_ops.document_source(
          task_context=OutputPath('context'),
          batch_size=FLAGS.batch_size,
          corpus_name=FLAGS.training_corpus)
      sink = gen_parser_ops.document_sink(
          task_context=OutputPath('context'),
          corpus_name='projectivized-training-corpus',
          documents=gen_parser_ops.projectivize_filter(
              gen_parser_ops.well_formed_filter(source,
                                                task_context=OutputPath(
                                                    'context')),
              task_context=OutputPath('context')))
      while True:
        tf_last, _ = sess.run([last, sink])
        if tf_last:
          break

  logging.info('Training...')
  with tf.Session(FLAGS.tf_master) as sess:
    Train(sess, num_actions, feature_sizes, domain_sizes, embedding_dims) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:45,代码来源:parser_trainer.py

示例9: train

# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import set_verbosity [as 别名]
def train(data_folder):

    g = tf.Graph()
    with g.as_default():
        # Load dataset.
        frames, audio, ground_truth, _ = data_provider.get_split(data_folder, True,
                                                                 'train', FLAGS.batch_size,
						                  seq_length=FLAGS.seq_length)

        # Define model graph.
        with slim.arg_scope([slim.batch_norm, slim.layers.dropout],
                is_training=True):
          with slim.arg_scope(slim.nets.resnet_utils.resnet_arg_scope(is_training=True)):
                  prediction = models.get_model(FLAGS.model)(frames, audio,
                                                            hidden_units=FLAGS.hidden_units)

        for i, name in enumerate(['arousal', 'valence']):
            pred_single = tf.reshape(prediction[:, :, i], (-1,))
            gt_single = tf.reshape(ground_truth[:, :, i], (-1,))

            loss = losses.concordance_cc(pred_single, gt_single)
            tf.summary.scalar('losses/{} loss'.format(name), loss)

            mse = tf.reduce_mean(tf.square(pred_single - gt_single))
            tf.summary.scalar('losses/mse {} loss'.format(name), mse)

            slim.losses.add_loss(loss / 2.)

        total_loss = slim.losses.get_total_loss()
        tf.summary.scalar('losses/total loss', total_loss)

        optimizer = tf.train.AdamOptimizer(FLAGS.initial_learning_rate)

        init_fn = None
        with tf.Session(graph=g) as sess:
            if FLAGS.pretrained_model_checkpoint_path:
		# Need to specify which variables to restore (use scope of models)
                variables_to_restore = slim.get_variables()
                init_fn = slim.assign_from_checkpoint_fn(
                        FLAGS.pretrained_model_checkpoint_path, variables_to_restore)

            train_op = slim.learning.create_train_op(total_loss,
                                                     optimizer,
                                                     summarize_gradients=True)


            logging.set_verbosity(1)
            slim.learning.train(train_op,
                                FLAGS.train_dir,
                                init_fn=init_fn,
                                save_summaries_secs=60,
                                save_interval_secs=300) 
开发者ID:tzirakis,项目名称:Multimodal-Emotion-Recognition,代码行数:54,代码来源:emotion_train.py


注:本文中的tensorflow.python.platform.tf_logging.set_verbosity方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。