当前位置: 首页>>代码示例>>Python>>正文


Python gfile.Open方法代码示例

本文整理汇总了Python中tensorflow.python.platform.gfile.Open方法的典型用法代码示例。如果您正苦于以下问题:Python gfile.Open方法的具体用法?Python gfile.Open怎么用?Python gfile.Open使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.platform.gfile的用法示例。


在下文中一共展示了gfile.Open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: generate_tfprof_profile

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def generate_tfprof_profile(profiler, tfprof_file):
  """Generates a tfprof profile, writing it to a file and printing top ops.

  Args:
    profiler: A tf.profiler.Profiler. `profiler.add_step` must have already been
      called.
    tfprof_file: The filename to write the ProfileProto to.
  """
  profile_proto = profiler.serialize_to_string()
  log_fn('Dumping ProfileProto to %s' % tfprof_file)
  with gfile.Open(tfprof_file, 'wb') as f:
    f.write(profile_proto)

  # Print out the execution times of the top operations. Note this
  # information can also be obtained with the dumped ProfileProto, but
  # printing it means tfprof doesn't have to be used if all the user wants
  # is the top ops.
  options = tf.profiler.ProfileOptionBuilder.time_and_memory()
  options['max_depth'] = _NUM_OPS_TO_PRINT
  options['order_by'] = 'accelerator_micros'
  profiler.profile_operations(options) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:23,代码来源:benchmark_cnn.py

示例2: load_tensor_from_event_file

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def load_tensor_from_event_file(event_file_path):
  """Load a tensor from an event file.

  Assumes that the event file contains a `Event` protobuf and the `Event`
  protobuf contains a `Tensor` value.

  Args:
    event_file_path: (`str`) path to the event file.

  Returns:
    The tensor value loaded from the event file, as a `numpy.ndarray`. For
    uninitialized Tensors, returns `None`. For Tensors of data types that
    cannot be converted to `numpy.ndarray` (e.g., `tf.resource`), return
    `None`.
  """

  event = event_pb2.Event()
  with gfile.Open(event_file_path, "rb") as f:
    event.ParseFromString(f.read())
    return load_tensor_from_event(event) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:22,代码来源:debug_data.py

示例3: RetrieveAsset

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def RetrieveAsset(logdir, plugin_name, asset_name):
  """Retrieve a particular plugin asset from a logdir.

  Args:
    logdir: A directory that was created by a TensorFlow summary.FileWriter.
    plugin_name: The plugin we want an asset from.
    asset_name: The name of the requested asset.

  Returns:
    string contents of the plugin asset.

  Raises:
    KeyError: if the asset does not exist.
  """

  asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name)
  try:
    with gfile.Open(asset_path, "r") as f:
      return f.read()
  except errors_impl.NotFoundError:
    raise KeyError("Asset path %s not found" % asset_path)
  except errors_impl.OpError as e:
    raise KeyError("Couldn't read asset path: %s, OpError %s" % (asset_path, e)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:25,代码来源:plugin_asset_util.py

示例4: write_op_log

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):
  """Log provided 'op_log', and add additional model information below.

    The API also assigns ops in tf.trainable_variables() an op type called
    '_trainable_variables'.
    The API also logs 'flops' statistics for ops with op.RegisterStatistics()
    defined. flops calculation depends on Tensor shapes defined in 'graph',
    which might not be complete, 'run_meta', if provided, completes the shape
    information with best effort.

  Args:
    graph: tf.Graph.
    log_dir: directory to write the log file.
    op_log: (Optional) OpLog proto to be written. If not provided, an new
        one is created.
    run_meta: (Optional) RunMetadata proto that helps flops computation using
        run time shape information.
    add_trace: Whether to add op trace information. Used to support "code" view.
  """
  op_log = _merge_default_with_oplog(graph, op_log, run_meta, add_trace)

  with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log:
    log.write(op_log.SerializeToString()) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:25,代码来源:tfprof_logger.py

示例5: load_csv_with_header

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def load_csv_with_header(filename,
                         target_dtype,
                         features_dtype,
                         target_column=-1):
  """Load dataset from CSV file with a header row."""
  with gfile.Open(filename) as csv_file:
    data_file = csv.reader(csv_file)
    header = next(data_file)
    n_samples = int(header[0])
    n_features = int(header[1])
    data = np.zeros((n_samples, n_features), dtype=features_dtype)
    target = np.zeros((n_samples,), dtype=target_dtype)
    for i, row in enumerate(data_file):
      target[i] = np.asarray(row.pop(target_column), dtype=target_dtype)
      data[i] = np.asarray(row, dtype=features_dtype)

  return Dataset(data=data, target=target) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:19,代码来源:base.py

示例6: testWriteScreenOutputToFileWorks

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def testWriteScreenOutputToFileWorks(self):
    output_path = tempfile.mktemp()

    ui = MockCursesUI(
        40,
        80,
        command_sequence=[
            string_to_codes("babble -n 2>%s\n" % output_path),
            self._EXIT
        ])

    ui.register_command_handler("babble", self._babble, "")
    ui.run_ui()

    self.assertEqual(1, len(ui.unwrapped_outputs))

    with gfile.Open(output_path, "r") as f:
      self.assertEqual(b"bar\nbar\n", f.read())

    # Clean up output file.
    gfile.Remove(output_path) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:23,代码来源:curses_ui_test.py

示例7: write_op_log

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def write_op_log(graph, log_dir, op_log=None, run_meta=None):
  """Log provided 'op_log', and add additional model information below.

    The API also assigns ops in tf.trainable_variables() an op type called
    '_trainable_variables'.
    The API also logs 'flops' statistics for ops with op.RegisterStatistics()
    defined. flops calculation depends on Tensor shapes defined in 'graph',
    which might not be complete, 'run_meta', if provided, completes the shape
    information with best effort.

  Args:
    graph: tf.Graph.
    log_dir: directory to write the log file.
    op_log: (Optional) OpLog proto to be written. If not provided, an new
        one is created.
    run_meta: (Optional) RunMetadata proto that helps flops computation using
        run time shape information.
  """
  op_log = _merge_default_with_oplog(graph, op_log, run_meta)

  with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log:
    log.write(op_log.SerializeToString()) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:24,代码来源:tfprof_logger.py

示例8: read_data_files

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def read_data_files(self, subset='train'):
    """Reads from data file and returns images and labels in a numpy array."""
    assert self.data_dir, ('Cannot call `read_data_files` when using synthetic '
                           'data')
    if subset == 'train':
      filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i)
                   for i in xrange(1, 6)]
    elif subset == 'validation':
      filenames = [os.path.join(self.data_dir, 'test_batch')]
    else:
      raise ValueError('Invalid data subset "%s"' % subset)

    inputs = []
    for filename in filenames:
      with gfile.Open(filename, 'r') as f:
        inputs.append(cPickle.load(f))
    # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
    # input format.
    all_images = np.concatenate(
        [each_input['data'] for each_input in inputs]).astype(np.float32)
    all_labels = np.concatenate(
        [each_input['labels'] for each_input in inputs])
    return all_images, all_labels 
开发者ID:IntelAI,项目名称:models,代码行数:25,代码来源:datasets.py

示例9: read_data_files

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def read_data_files(self, subset='train'):
        """Reads from data file and returns images and labels in a numpy
        array."""
        assert self.data_dir, (
            'Cannot call `read_data_files` when using synthetic '
            'data')
        if subset == 'train':
            filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i)
                         for i in xrange(1, 6)]
        elif subset == 'validation':
            filenames = [os.path.join(self.data_dir, 'test_batch')]
        else:
            raise ValueError('Invalid data subset "%s"' % subset)

        inputs = []
        for filename in filenames:
            with gfile.Open(filename, 'r') as f:
                inputs.append(cPickle.load(f))
        # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
        # input format.
        all_images = np.concatenate(
            [each_input['data'] for each_input in inputs]).astype(np.float32)
        all_labels = np.concatenate(
            [each_input['labels'] for each_input in inputs])
        return all_images, all_labels 
开发者ID:snuspl,项目名称:parallax,代码行数:27,代码来源:datasets.py

示例10: read_data_files

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def read_data_files(self, subset='train'):
    """Reads from data file and return images and labels in a numpy array."""
    if subset == 'train':
      filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i)
                   for i in xrange(1, 6)]
    elif subset == 'validation':
      filenames = [os.path.join(self.data_dir, 'test_batch')]
    else:
      raise ValueError('Invalid data subset "%s"' % subset)

    inputs = []
    for filename in filenames:
      with gfile.Open(filename, 'r') as f:
        inputs.append(cPickle.load(f))
    # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
    # input format.
    all_images = np.concatenate(
        [each_input['data'] for each_input in inputs]).astype(np.float32)
    all_labels = np.concatenate(
        [each_input['labels'] for each_input in inputs])
    return all_images, all_labels 
开发者ID:awslabs,项目名称:deeplearning-benchmark,代码行数:23,代码来源:datasets.py

示例11: read_data_files

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def read_data_files(self, subset='train'):
        """Reads from data file and return images and labels in a numpy array."""
        if subset == 'train':
            filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i)
                         for i in xrange(1, 6)]
        elif subset == 'validation':
            filenames = [os.path.join(self.data_dir, 'test_batch')]
        else:
            raise ValueError('Invalid data subset "%s"' % subset)

        inputs = []
        for filename in filenames:
            with gfile.Open(filename, 'r') as f:
                inputs.append(cPickle.load(f))
        # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
        # input format.
        all_images = np.concatenate(
            [each_input['data'] for each_input in inputs]).astype(np.float32)
        all_labels = np.concatenate(
            [each_input['labels'] for each_input in inputs])
        return all_images, all_labels 
开发者ID:balancap,项目名称:tf-imagenet,代码行数:23,代码来源:cifar10.py

示例12: write_op_log

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):
  """Log provided 'op_log', and add additional model information below.

    The API also assigns ops in tf.trainable_variables() an op type called
    '_trainable_variables'.
    The API also logs 'flops' statistics for ops with op.RegisterStatistics()
    defined. flops calculation depends on Tensor shapes defined in 'graph',
    which might not be complete. 'run_meta', if provided, completes the shape
    information with best effort.

  Args:
    graph: tf.Graph.
    log_dir: directory to write the log file.
    op_log: (Optional) OpLogProto proto to be written. If not provided, an new
        one is created.
    run_meta: (Optional) RunMetadata proto that helps flops computation using
        run time shape information.
    add_trace: Whether to add python code trace information.
        Used to support "code" view.
  """
  op_log = _merge_default_with_oplog(graph, op_log, run_meta, add_trace)

  with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log:
    log.write(op_log.SerializeToString()) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:26,代码来源:tfprof_logger.py

示例13: read_data_files

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def read_data_files(self, subset='train'):
    """Reads from data file and returns images and labels in a numpy array."""
    assert self.data_dir, ('Cannot call `read_data_files` when using synthetic '
                           'data')
    if subset == 'train':
      filenames = [
          os.path.join(self.data_dir, 'data_batch_%d' % i)
          for i in xrange(1, 6)
      ]
    elif subset == 'validation':
      filenames = [os.path.join(self.data_dir, 'test_batch')]
    else:
      raise ValueError('Invalid data subset "%s"' % subset)

    inputs = []
    for filename in filenames:
      with gfile.Open(filename, 'rb') as f:
        # python2 does not have the encoding parameter
        encoding = {} if six.PY2 else {'encoding': 'bytes'}
        inputs.append(cPickle.load(f, **encoding))
    # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
    # input format.
    all_images = np.concatenate(
        [each_input[b'data'] for each_input in inputs]).astype(np.float32)
    all_labels = np.concatenate(
        [each_input[b'labels'] for each_input in inputs])
    return all_images, all_labels 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:29,代码来源:datasets.py

示例14: _load_graph_def_from_event_file

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def _load_graph_def_from_event_file(event_file_path):
  event = event_pb2.Event()
  with gfile.Open(event_file_path, "rb") as f:
    event.ParseFromString(f.read())

  return graph_pb2.GraphDef.FromString(event.graph_def) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:8,代码来源:debug_data.py

示例15: _load_log_message_from_event_file

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Open [as 别名]
def _load_log_message_from_event_file(event_file_path):
  event = event_pb2.Event()
  with gfile.Open(event_file_path, "rb") as f:
    event.ParseFromString(f.read())

  return event.log_message.message 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:8,代码来源:debug_data.py


注:本文中的tensorflow.python.platform.gfile.Open方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。