当前位置: 首页>>代码示例>>Python>>正文


Python tf_record.tf_record_iterator方法代码示例

本文整理汇总了Python中tensorflow.python.lib.io.tf_record.tf_record_iterator方法的典型用法代码示例。如果您正苦于以下问题:Python tf_record.tf_record_iterator方法的具体用法?Python tf_record.tf_record_iterator怎么用?Python tf_record.tf_record_iterator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.lib.io.tf_record的用法示例。


在下文中一共展示了tf_record.tf_record_iterator方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: list_events

# 需要导入模块: from tensorflow.python.lib.io import tf_record [as 别名]
# 或者: from tensorflow.python.lib.io.tf_record import tf_record_iterator [as 别名]
def list_events(self):
    """List all scalar events in the directory.

    Returns:
      A dictionary. Key is the name of a event. Value is a set of dirs that contain that event.
    """
    event_dir_dict = collections.defaultdict(set)

    for event_file in self._glob_events_files(self._paths, recursive=True):
      dir = os.path.dirname(event_file)
      try:
        for record in tf_record.tf_record_iterator(event_file):
          event = event_pb2.Event.FromString(record)
          if event.summary is None or event.summary.value is None:
            continue
          for value in event.summary.value:
            if value.simple_value is None or value.tag is None:
              continue
            event_dir_dict[value.tag].add(dir)
      except tf.errors.DataLossError:
        # DataLossError seems to happen sometimes for small logs.
        # We want to show good records regardless.
        continue
    return dict(event_dir_dict) 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:26,代码来源:_summary.py

示例2: read_patch_dimensions

# 需要导入模块: from tensorflow.python.lib.io import tf_record [as 别名]
# 或者: from tensorflow.python.lib.io.tf_record import tf_record_iterator [as 别名]
def read_patch_dimensions():
  """Reads the dimensions of the input patches from disk.

  Parses the first example in the training set, which must have "height" and
  "width" features.

  Returns:
    Tuple of (height, width) read from disk, using the glob passed to
    --train_input_patches.
  """
  for filename in file_io.get_matching_files(FLAGS.train_input_patches):
    # If one matching file is empty, go on to the next file.
    for record in tf_record.tf_record_iterator(filename):
      example = tf.train.Example.FromString(record)
      # Convert long (int64) to int, necessary for use in feature columns in
      # Python 2.
      patch_height = int(example.features.feature['height'].int64_list.value[0])
      patch_width = int(example.features.feature['width'].int64_list.value[0])
      return patch_height, patch_width 
开发者ID:tensorflow,项目名称:moonlight,代码行数:21,代码来源:glyph_patches.py

示例3: load_clusters

# 需要导入模块: from tensorflow.python.lib.io import tf_record [as 别名]
# 或者: from tensorflow.python.lib.io.tf_record import tf_record_iterator [as 别名]
def load_clusters(input_path):
  """Loads TFRecords of Examples representing the k-means clusters.

  Examples are typically the output of `staffline_patches_kmeans_pipeline.py`.

  Args:
    input_path: Path to the TFRecords of Examples.

  Returns:
    A NumPy array of shape (num_clusters, patch_height, patch_width).
  """

  def parse_example(example_str):
    example = tf.train.Example()
    example.ParseFromString(example_str)
    height = example.features.feature['height'].int64_list.value[0]
    width = example.features.feature['width'].int64_list.value[0]
    return np.asarray(
        example.features.feature['features'].float_list.value).reshape(
            (height, width))

  return np.asarray([
      parse_example(example)
      for example in tf_record.tf_record_iterator(input_path)
  ]) 
开发者ID:tensorflow,项目名称:moonlight,代码行数:27,代码来源:kmeans_labeler.py

示例4: local_predict

# 需要导入模块: from tensorflow.python.lib.io import tf_record [as 别名]
# 或者: from tensorflow.python.lib.io.tf_record import tf_record_iterator [as 别名]
def local_predict(args):
  """Runs prediction locally."""

  sess = session.Session()
  _ = loader.load(sess, [tag_constants.SERVING], args.model_dir)

  # get the mappings between aliases and tensor names
  # for both inputs and outputs
  input_alias_map = json.loads(sess.graph.get_collection('inputs')[0])
  output_alias_map = json.loads(sess.graph.get_collection('outputs')[0])
  aliases, tensor_names = zip(*output_alias_map.items())

  for input_file in args.input:
    feed_dict = collections.defaultdict(list)
    for line in tf_record.tf_record_iterator(input_file):
      feed_dict[input_alias_map['examples_bytes']].append(line)

    if args.dry_run:
      print('Feed data dict %s to graph and fetch %s' % (
          feed_dict, tensor_names))
    else:
      result = sess.run(fetches=tensor_names, feed_dict=feed_dict)
      for row in zip(*result):
        print(json.dumps(
            {name: (value.tolist() if getattr(value, 'tolist', None) else value)
             for name, value in zip(aliases, row)})) 
开发者ID:GoogleCloudPlatform,项目名称:cloudml-samples,代码行数:28,代码来源:local_predict.py

示例5: testWriteEvents

# 需要导入模块: from tensorflow.python.lib.io import tf_record [as 别名]
# 或者: from tensorflow.python.lib.io.tf_record import tf_record_iterator [as 别名]
def testWriteEvents(self):
    file_prefix = os.path.join(self.get_temp_dir(), "events")
    writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(file_prefix))
    filename = compat.as_text(writer.FileName())
    event_written = event_pb2.Event(
        wall_time=123.45, step=67,
        summary=summary_pb2.Summary(
            value=[summary_pb2.Summary.Value(tag="foo", simple_value=89.0)]))
    writer.WriteEvent(event_written)
    writer.Flush()
    writer.Close()

    with self.assertRaises(errors.NotFoundError):
      for r in tf_record.tf_record_iterator(filename + "DOES_NOT_EXIST"):
        self.assertTrue(False)

    reader = tf_record.tf_record_iterator(filename)
    event_read = event_pb2.Event()

    event_read.ParseFromString(next(reader))
    self.assertTrue(event_read.HasField("file_version"))

    event_read.ParseFromString(next(reader))
    # Second event
    self.assertProtoEquals("""
    wall_time: 123.45 step: 67
    summary { value { tag: 'foo' simple_value: 89.0 } }
    """, event_read)

    with self.assertRaises(StopIteration):
      next(reader) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:33,代码来源:events_writer_test.py

示例6: testPipeline_corpusImage

# 需要导入模块: from tensorflow.python.lib.io import tf_record [as 别名]
# 或者: from tensorflow.python.lib.io.tf_record import tf_record_iterator [as 别名]
def testPipeline_corpusImage(self):
    filename = os.path.join(tf.resource_loader.get_data_files_path(),
                            '../../testdata/IMSLP00747-000.png')
    with tempfile.NamedTemporaryFile() as output_examples:
      # Run the pipeline to get the staffline patches.
      with beam.Pipeline() as pipeline:
        dofn = staffline_patches_dofn.StafflinePatchesDoFn(
            PATCH_HEIGHT, PATCH_WIDTH, NUM_STAFFLINES, TIMEOUT_MS,
            MAX_PATCHES_PER_PAGE)
        # pylint: disable=expression-not-assigned
        (pipeline | beam.transforms.Create([filename])
         | beam.transforms.ParDo(dofn) | beam.io.WriteToTFRecord(
             output_examples.name,
             beam.coders.ProtoCoder(tf.train.Example),
             shard_name_template=''))
      # Get the staffline images from a local TensorFlow session.
      extractor = staffline_extractor.StafflinePatchExtractor(
          staffline_extractor.DEFAULT_NUM_SECTIONS, PATCH_HEIGHT, PATCH_WIDTH)
      with tf.Session(graph=extractor.graph):
        expected_patches = [
            tuple(patch.ravel())
            for unused_key, patch in extractor.page_patch_iterator(filename)
        ]
      for example_bytes in tf_record.tf_record_iterator(output_examples.name):
        example = tf.train.Example()
        example.ParseFromString(example_bytes)
        patch_pixels = tuple(
            example.features.feature['features'].float_list.value)
        if patch_pixels not in expected_patches:
          self.fail('Missing patch {}'.format(patch_pixels)) 
开发者ID:tensorflow,项目名称:moonlight,代码行数:32,代码来源:staffline_patches_dofn_test.py

示例7: summary_iterator

# 需要导入模块: from tensorflow.python.lib.io import tf_record [as 别名]
# 或者: from tensorflow.python.lib.io.tf_record import tf_record_iterator [as 别名]
def summary_iterator(path):
  # pylint: disable=line-too-long
  """An iterator for reading `Event` protocol buffers from an event file.

  You can use this function to read events written to an event file. It returns
  a Python iterator that yields `Event` protocol buffers.

  Example: Print the contents of an events file.

  ```python
  for e in tf.train.summary_iterator(path to events file):
      print(e)
  ```

  Example: Print selected summary values.

  ```python
  # This example supposes that the events file contains summaries with a
  # summary value tag 'loss'.  These could have been added by calling
  # `add_summary()`, passing the output of a scalar summary op created with
  # with: `tf.summary.scalar('loss', loss_tensor)`.
  for e in tf.train.summary_iterator(path to events file):
      for v in e.summary.value:
          if v.tag == 'loss':
              print(v.simple_value)
  ```

  See the protocol buffer definitions of
  [Event](https://www.tensorflow.org/code/tensorflow/core/util/event.proto)
  and
  [Summary](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
  for more information about their attributes.

  Args:
    path: The path to an event file created by a `SummaryWriter`.

  Yields:
    `Event` protocol buffers.
  """
  # pylint: enable=line-too-long
  for r in tf_record.tf_record_iterator(path):
    yield event_pb2.Event.FromString(r) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:44,代码来源:summary_iterator.py

示例8: get_events

# 需要导入模块: from tensorflow.python.lib.io import tf_record [as 别名]
# 或者: from tensorflow.python.lib.io.tf_record import tf_record_iterator [as 别名]
def get_events(self, event_names):
    """Get all events as pandas DataFrames given a list of names.
    Args:
      event_names: A list of events to get.
    Returns:
      A list with the same length and order as event_names. Each element is a dictionary
          {dir1: DataFrame1, dir2: DataFrame2, ...}.
          Multiple directories may contain events with the same name, but they are different
          events (i.e. 'loss' under trains_set/, and 'loss' under eval_set/.)
    """

    if isinstance(event_names, six.string_types):
      event_names = [event_names]

    all_events = self.list_events()
    dirs_to_look = set()
    for event, dirs in six.iteritems(all_events):
      if event in event_names:
        dirs_to_look.update(dirs)

    ret_events = [collections.defaultdict(lambda: pd.DataFrame(columns=['time', 'step', 'value']))
                  for i in range(len(event_names))]
    for event_file in self._glob_events_files(dirs_to_look, recursive=False):
      try:
        for record in tf_record.tf_record_iterator(event_file):
          event = event_pb2.Event.FromString(record)
          if event.summary is None or event.wall_time is None or event.summary.value is None:
            continue

          event_time = datetime.datetime.fromtimestamp(event.wall_time)
          for value in event.summary.value:
            if value.tag not in event_names or value.simple_value is None:
              continue

            index = event_names.index(value.tag)
            dir_event_dict = ret_events[index]
            dir = os.path.dirname(event_file)
            # Append a row.
            df = dir_event_dict[dir]
            df.loc[len(df)] = [event_time, event.step, value.simple_value]
      except tf.errors.DataLossError:
        # DataLossError seems to happen sometimes for small logs.
        # We want to show good records regardless.
        continue

    for idx, dir_event_dict in enumerate(ret_events):
      for df in dir_event_dict.values():
        df.sort_values(by=['time'], inplace=True)
      ret_events[idx] = dict(dir_event_dict)

    return ret_events 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:53,代码来源:_summary.py

示例9: summary_iterator

# 需要导入模块: from tensorflow.python.lib.io import tf_record [as 别名]
# 或者: from tensorflow.python.lib.io.tf_record import tf_record_iterator [as 别名]
def summary_iterator(path):
  # pylint: disable=line-too-long
  """An iterator for reading `Event` protocol buffers from an event file.

  You can use this function to read events written to an event file. It returns
  a Python iterator that yields `Event` protocol buffers.

  Example: Print the contents of an events file.

  ```python
  for e in tf.train.summary_iterator(path to events file):
      print(e)
  ```

  Example: Print selected summary values.

  ```python
  # This example supposes that the events file contains summaries with a
  # summary value tag 'loss'.  These could have been added by calling
  # `add_summary()`, passing the output of a scalar summary op created with
  # with: `tf.scalar_summary(['loss'], loss_tensor)`.
  for e in tf.train.summary_iterator(path to events file):
      for v in e.summary.value:
          if v.tag == 'loss':
              print(v.simple_value)
  ```

  See the protocol buffer definitions of
  [Event](https://www.tensorflow.org/code/tensorflow/core/util/event.proto)
  and
  [Summary](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
  for more information about their attributes.

  Args:
    path: The path to an event file created by a `SummaryWriter`.

  Yields:
    `Event` protocol buffers.
  """
  # pylint: enable=line-too-long
  for r in tf_record.tf_record_iterator(path):
    yield event_pb2.Event.FromString(r) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:44,代码来源:summary_iterator.py


注:本文中的tensorflow.python.lib.io.tf_record.tf_record_iterator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。