当前位置: 首页>>代码示例>>Python>>正文


Python tf_record.tf_record_iterator函数代码示例

本文整理汇总了Python中tensorflow.python.lib.io.tf_record.tf_record_iterator函数的典型用法代码示例。如果您正苦于以下问题:Python tf_record_iterator函数的具体用法?Python tf_record_iterator怎么用?Python tf_record_iterator使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了tf_record_iterator函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testWriteEvents

  def testWriteEvents(self):
    file_prefix = os.path.join(self.get_temp_dir(), "events")
    writer = pywrap_tensorflow.EventsWriter(file_prefix)
    filename = writer.FileName()
    event_written = event_pb2.Event(
        wall_time=123.45, step=67,
        summary=summary_pb2.Summary(
            value=[summary_pb2.Summary.Value(tag="foo", simple_value=89.0)]))
    writer.WriteEvent(event_written)
    writer.Flush()
    writer.Close()

    with self.assertRaises(IOError):
      for r in tf_record.tf_record_iterator(filename + "DOES_NOT_EXIST"):
        self.assertTrue(False)

    reader = tf_record.tf_record_iterator(filename)
    event_read = event_pb2.Event()

    event_read.ParseFromString(next(reader))
    self.assertTrue(event_read.HasField("file_version"))

    event_read.ParseFromString(next(reader))
    # Second event
    self.assertProtoEquals("""
    wall_time: 123.45 step: 67
    summary { value { tag: 'foo' simple_value: 89.0 } }
    """, event_read)

    with self.assertRaises(StopIteration):
      next(reader)
开发者ID:debaratidas1994,项目名称:tensorflow,代码行数:31,代码来源:events_writer_test.py

示例2: _CompressionSizeDelta

  def _CompressionSizeDelta(self, records, options_a, options_b):
    """Validate compression with options_a and options_b and return size delta.

    Compress records with options_a and options_b. Uncompress both compressed
    files and assert that the contents match the original records. Finally
    calculate how much smaller the file compressed with options_a was than the
    file compressed with options_b.

    Args:
      records: The records to compress
      options_a: First set of options to compress with, the baseline for size.
      options_b: Second set of options to compress with.

    Returns:
      The difference in file size when using options_a vs options_b. A positive
      value means options_a was a better compression than options_b. A negative
      value means options_b had better compression than options_a.

    """

    fn_a = self._WriteRecordsToFile(records, "tfrecord_a", options=options_a)
    test_a = list(tf_record.tf_record_iterator(fn_a, options=options_a))
    self.assertEqual(records, test_a, options_a)

    fn_b = self._WriteRecordsToFile(records, "tfrecord_b", options=options_b)
    test_b = list(tf_record.tf_record_iterator(fn_b, options=options_b))
    self.assertEqual(records, test_b, options_b)

    # Negative number => better compression.
    return os.path.getsize(fn_a) - os.path.getsize(fn_b)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:30,代码来源:tf_record_test.py

示例3: testZLibFlushRecord

  def testZLibFlushRecord(self):
    original = [b"small record"]
    fn = self._WriteRecordsToFile(original, "small_record")
    with open(fn, "rb") as h:
      buff = h.read()

    # creating more blocks and trailing blocks shouldn't break reads
    compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS)

    output = b""
    for c in buff:
      if isinstance(c, int):
        c = six.int2byte(c)
      output += compressor.compress(c)
      output += compressor.flush(zlib.Z_FULL_FLUSH)

    output += compressor.flush(zlib.Z_FULL_FLUSH)
    output += compressor.flush(zlib.Z_FULL_FLUSH)
    output += compressor.flush(zlib.Z_FINISH)

    # overwrite the original file with the compressed data
    with open(fn, "wb") as h:
      h.write(output)

    options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
    actual = list(tf_record.tf_record_iterator(fn, options=options))
    self.assertEqual(actual, original)
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:27,代码来源:tf_record_test.py

示例4: testWriteGZIP

 def testWriteGZIP(self):
   options = tf_record.TFRecordOptions(tf_record.TFRecordCompressionType.GZIP)
   self.evaluate(
       self.writer_fn(self._createFile(options), compression_type="GZIP"))
   for i, r in enumerate(
       tf_record.tf_record_iterator(self._outputFilename(), options=options)):
     self.assertAllEqual(self._record(i), r)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:7,代码来源:tf_record_writer_test.py

示例5: list_events

  def list_events(self):
    """List all scalar events in the directory.

    Returns:
      A dictionary. Key is the name of a event. Value is a set of dirs that contain that event.
    """
    event_dir_dict = {}
    for event_file in self._glob_events_files(self._paths):
      dir = os.path.dirname(event_file)
      try:
        for record in tf_record.tf_record_iterator(event_file):
          event = event_pb2.Event.FromString(record)
          if event.summary is None or event.summary.value is None:
            continue
          for value in event.summary.value:
            if value.simple_value is None or value.tag is None:
              continue
            if value.tag not in event_dir_dict:
              event_dir_dict[value.tag] = set()
            event_dir_dict[value.tag].add(dir)
      except:
        # It seems current TF (1.0) has a bug when iterating events from a file near the end.
        # For now just catch and pass.
        # print('Error in iterating events from file ' + event_file)
        continue
    return event_dir_dict
开发者ID:javiervicho,项目名称:pydatalab,代码行数:26,代码来源:_summary.py

示例6: local_predict

def local_predict(args):
  """Runs prediction locally."""

  sess = session.Session()
  _ = loader.load(sess, [tag_constants.SERVING], args.model_dir)

  # get the mappings between aliases and tensor names
  # for both inputs and outputs
  input_alias_map = json.loads(sess.graph.get_collection('inputs')[0])
  output_alias_map = json.loads(sess.graph.get_collection('outputs')[0])
  aliases, tensor_names = zip(*output_alias_map.items())

  for input_file in args.input:
    feed_dict = collections.defaultdict(list)
    for line in tf_record.tf_record_iterator(input_file):
      feed_dict[input_alias_map['examples_bytes']].append(line)

    if args.dry_run:
      print('Feed data dict %s to graph and fetch %s' % (
          feed_dict, tensor_names))
    else:
      result = sess.run(fetches=tensor_names, feed_dict=feed_dict)
      for row in zip(*result):
        print(json.dumps(
            {name: (value.tolist() if getattr(value, 'tolist', None) else value)
             for name, value in zip(aliases, row)}))
开发者ID:zhang01GA,项目名称:cloudml-samples,代码行数:26,代码来源:local_predict.py

示例7: testWrite

 def testWrite(self):
   with self.cached_session() as sess:
     sess.run(
         self.writer, feed_dict={
             self.filename: self._createFile(),
         })
   for i, r in enumerate(tf_record.tf_record_iterator(self._outputFilename())):
     self.assertAllEqual(self._record(i), r)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:8,代码来源:tf_record_writer_test.py

示例8: testWriteAndRead

  def testWriteAndRead(self):
    records = list(map(self._Record, range(self._num_records)))
    for record in records:
      self._writer.write(record)
    self._writer.close()

    actual = list(tf_record.tf_record_iterator(self._fn, self._options))
    self.assertListEqual(actual, records)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:8,代码来源:tf_record_test.py

示例9: get_events

  def get_events(self, event_names):
    """Get all events as pandas DataFrames given a list of names.

    Args:
      event_names: A list of events to get.

    Returns:
      A list with the same length as event_names. Each element is a dictionary
          {dir1: DataFrame1, dir2: DataFrame2, ...}.
          Multiple directories may contain events with the same name, but they are different
          events (i.e. 'loss' under trains_set/, and 'loss' under eval_set/.)
    """

    if ((sys.version_info.major > 2 and isinstance(event_names, str)) or
       (sys.version_info.major <= 2 and isinstance(event_names, basestring))):
      event_names = [event_names]

    all_events = self.list_events()
    dirs_to_look = set()
    for event, dirs in all_events.iteritems():
      if event in event_names:
        dirs_to_look.update(dirs)

    ret_events = [dict() for i in range(len(event_names))]
    for dir in dirs_to_look:
      for event_file in self._glob_events_files([dir]):
        try:
          for record in tf_record.tf_record_iterator(event_file):
            event = event_pb2.Event.FromString(record)
            if event.summary is None or event.wall_time is None or event.summary.value is None:
              continue

            event_time = datetime.datetime.fromtimestamp(event.wall_time)
            for value in event.summary.value:
              if value.tag not in event_names or value.simple_value is None:
                continue

              index = event_names.index(value.tag)
              dir_event_dict = ret_events[index]
              if dir not in dir_event_dict:
                dir_event_dict[dir] = pd.DataFrame(
                    [[event_time, event.step, value.simple_value]],
                    columns=['time', 'step', 'value'])
              else:
                df = dir_event_dict[dir]
                # Append a row.
                df.loc[len(df)] = [event_time, event.step, value.simple_value]
        except:
          # It seems current TF (1.0) has a bug when iterating events from a file near the end.
          # For now just catch and pass.
          # print('Error in iterating events from file ' + event_file)
          continue

    for dir_event_dict in ret_events:
      for df in dir_event_dict.values():
        df.sort_values(by=['time'], inplace=True)

    return ret_events
开发者ID:javiervicho,项目名称:pydatalab,代码行数:58,代码来源:_summary.py

示例10: testWriteGzipRead

  def testWriteGzipRead(self):
    original = [b"foo", b"bar"]
    options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
    fn = self._WriteRecordsToFile(original, "write_gzip_read.tfrecord.gz",
                                  options)

    gzfn = self._GzipDecompressFile(fn, "write_gzip_read.tfrecord")
    actual = list(tf_record.tf_record_iterator(gzfn))
    self.assertEqual(actual, original)
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:9,代码来源:tf_record_test.py

示例11: testGzipReadWrite

  def testGzipReadWrite(self):
    """Verify that files produced are gzip compatible."""
    original = [b"foo", b"bar"]
    fn = self._WriteRecordsToFile(original, "gzip_read_write.tfrecord")
    gzfn = self._GzipCompressFile(fn, "tfrecord.gz")

    options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
    actual = list(tf_record.tf_record_iterator(gzfn, options=options))
    self.assertEqual(actual, original)
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:9,代码来源:tf_record_test.py

示例12: _readLastEvent

 def _readLastEvent(self, logdir=None):
   if not logdir:
     logdir = self._tmp_logdir
   files = [f for f in gfile.ListDirectory(logdir)
            if not gfile.IsDirectory(os.path.join(logdir, f))]
   file_path = os.path.join(logdir, files[0])
   records = list(tf_record.tf_record_iterator(file_path))
   event = event_pb2.Event()
   event.ParseFromString(records[-1])
   return event
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:10,代码来源:summary_writer_test.py

示例13: testZlibReadWrite

  def testZlibReadWrite(self):
    """Verify that files produced are zlib compatible."""
    original = [b"foo", b"bar"]
    fn = self._WriteRecordsToFile(original, "zlib_read_write.tfrecord")
    zfn = self._ZlibCompressFile(fn, "zlib_read_write.tfrecord.z")

    # read the compressed contents and verify.
    options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
    actual = list(tf_record.tf_record_iterator(zfn, options=options))
    self.assertEqual(actual, original)
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:10,代码来源:tf_record_test.py

示例14: testWriteZlibRead

 def testWriteZlibRead(self):
   """Verify compression with TFRecordWriter is zlib library compatible."""
   original = [b"foo", b"bar"]
   fn = self._WriteCompressedRecordsToFile(original,
                                           "write_zlib_read.tfrecord.z")
   zfn = self._ZlibDecompressFile(fn, "write_zlib_read.tfrecord")
   actual = []
   for r in tf_record.tf_record_iterator(zfn):
     actual.append(r)
   self.assertEqual(actual, original)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:10,代码来源:reader_ops_test.py

示例15: testWriteZlibRead

  def testWriteZlibRead(self):
    """Verify compression with TFRecordWriter is zlib library compatible."""
    original = [b"foo", b"bar"]
    options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
    fn = self._WriteRecordsToFile(original, "write_zlib_read.tfrecord.z",
                                  options)

    zfn = self._ZlibDecompressFile(fn, "write_zlib_read.tfrecord")
    actual = list(tf_record.tf_record_iterator(zfn))
    self.assertEqual(actual, original)
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:10,代码来源:tf_record_test.py


注:本文中的tensorflow.python.lib.io.tf_record.tf_record_iterator函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。