当前位置: 首页>>代码示例>>Python>>正文


Python gen_parser_ops.projectivize_filter方法代码示例

本文整理汇总了Python中syntaxnet.ops.gen_parser_ops.projectivize_filter方法的典型用法代码示例。如果您正苦于以下问题:Python gen_parser_ops.projectivize_filter方法的具体用法?Python gen_parser_ops.projectivize_filter怎么用?Python gen_parser_ops.projectivize_filter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在syntaxnet.ops.gen_parser_ops的用法示例。


在下文中一共展示了gen_parser_ops.projectivize_filter方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from syntaxnet.ops import gen_parser_ops [as 别名]
# 或者: from syntaxnet.ops.gen_parser_ops import projectivize_filter [as 别名]
def __init__(self, filepath, batch_size=32,
               projectivize=False, morph_to_pos=False):
    self._graph = tf.Graph()
    self._session = tf.Session(graph=self._graph)
    task_context_str = """
          input {
            name: 'documents'
            record_format: 'conll-sentence'
            Part {
             file_pattern: '%s'
            }
          }""" % filepath
    if morph_to_pos:
      task_context_str += """
          Parameter {
            name: "join_category_to_pos"
            value: "true"
          }
          Parameter {
            name: "add_pos_as_attribute"
            value: "true"
          }
          Parameter {
            name: "serialize_morph_to_pos"
            value: "true"
          }
          """
    with self._graph.as_default():
      self._source, self._is_last = gen_parser_ops.document_source(
          task_context_str=task_context_str, batch_size=batch_size)
      self._source = gen_parser_ops.well_formed_filter(self._source)
      if projectivize:
        self._source = gen_parser_ops.projectivize_filter(self._source) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:35,代码来源:sentence_io.py

示例2: __init__

# 需要导入模块: from syntaxnet.ops import gen_parser_ops [as 别名]
# 或者: from syntaxnet.ops.gen_parser_ops import projectivize_filter [as 别名]
def __init__(self,
               filepath,
               record_format,
               batch_size=32,
               check_well_formed=False,
               projectivize=False,
               morph_to_pos=False):
    self._graph = tf.Graph()
    self._session = tf.Session(graph=self._graph)
    task_context_str = """
          input {
            name: 'documents'
            record_format: '%s'
            Part {
             file_pattern: '%s'
            }
          }""" % (record_format, filepath)
    if morph_to_pos:
      task_context_str += """
          Parameter {
            name: "join_category_to_pos"
            value: "true"
          }
          Parameter {
            name: "add_pos_as_attribute"
            value: "true"
          }
          Parameter {
            name: "serialize_morph_to_pos"
            value: "true"
          }
          """
    with self._graph.as_default():
      self._source, self._is_last = gen_parser_ops.document_source(
          task_context_str=task_context_str, batch_size=batch_size)
      if check_well_formed:
        self._source = gen_parser_ops.well_formed_filter(self._source)
      if projectivize:
        self._source = gen_parser_ops.projectivize_filter(self._source) 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:41,代码来源:sentence_io.py

示例3: main

# 需要导入模块: from syntaxnet.ops import gen_parser_ops [as 别名]
# 或者: from syntaxnet.ops.gen_parser_ops import projectivize_filter [as 别名]
def main(unused_argv):
  logging.set_verbosity(logging.INFO)
  if not gfile.IsDirectory(OutputPath('')):
    gfile.MakeDirs(OutputPath(''))

  # Rewrite context.
  RewriteContext()

  # Creates necessary term maps.
  if FLAGS.compute_lexicon:
    logging.info('Computing lexicon...')
    with tf.Session(FLAGS.tf_master) as sess:
      gen_parser_ops.lexicon_builder(task_context=OutputPath('context'),
                                     corpus_name=FLAGS.training_corpus).run()
  with tf.Session(FLAGS.tf_master) as sess:
    feature_sizes, domain_sizes, embedding_dims, num_actions = sess.run(
        gen_parser_ops.feature_size(task_context=OutputPath('context'),
                                    arg_prefix=FLAGS.arg_prefix))

  # Well formed and projectivize.
  if FLAGS.projectivize_training_set:
    logging.info('Preprocessing...')
    with tf.Session(FLAGS.tf_master) as sess:
      source, last = gen_parser_ops.document_source(
          task_context=OutputPath('context'),
          batch_size=FLAGS.batch_size,
          corpus_name=FLAGS.training_corpus)
      sink = gen_parser_ops.document_sink(
          task_context=OutputPath('context'),
          corpus_name='projectivized-training-corpus',
          documents=gen_parser_ops.projectivize_filter(
              gen_parser_ops.well_formed_filter(source,
                                                task_context=OutputPath(
                                                    'context')),
              task_context=OutputPath('context')))
      while True:
        tf_last, _ = sess.run([last, sink])
        if tf_last:
          break

  logging.info('Training...')
  with tf.Session(FLAGS.tf_master) as sess:
    Train(sess, num_actions, feature_sizes, domain_sizes, embedding_dims) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:45,代码来源:parser_trainer.py


注:本文中的syntaxnet.ops.gen_parser_ops.projectivize_filter方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。