当前位置: 首页>>代码示例>>Python>>正文


Python convert.optional_param_to_tensor函数代码示例

本文整理汇总了Python中tensorflow.python.data.util.convert.optional_param_to_tensor函数的典型用法代码示例。如果您正苦于以下问题:Python optional_param_to_tensor函数的具体用法?Python optional_param_to_tensor怎么用?Python optional_param_to_tensor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了optional_param_to_tensor函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

  def __init__(self,
               filenames,
               record_bytes,
               header_bytes=None,
               footer_bytes=None,
               buffer_size=None):
    """Creates a `FixedLengthRecordDataset`.

    Args:
      filenames: A `tf.string` tensor containing one or more filenames.
      record_bytes: A `tf.int64` scalar representing the number of bytes in
        each record.
      header_bytes: (Optional.) A `tf.int64` scalar representing the number of
        bytes to skip at the start of a file.
      footer_bytes: (Optional.) A `tf.int64` scalar representing the number of
        bytes to ignore at the end of a file.
      buffer_size: (Optional.) A `tf.int64` scalar representing the number of
        bytes to buffer when reading.
    """
    super(FixedLengthRecordDataset, self).__init__()
    self._filenames = ops.convert_to_tensor(
        filenames, dtype=dtypes.string, name="filenames")
    self._record_bytes = ops.convert_to_tensor(
        record_bytes, dtype=dtypes.int64, name="record_bytes")

    self._header_bytes = convert.optional_param_to_tensor(
        "header_bytes", header_bytes)
    self._footer_bytes = convert.optional_param_to_tensor(
        "footer_bytes", footer_bytes)
    self._buffer_size = convert.optional_param_to_tensor(
        "buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:31,代码来源:readers.py

示例2: __init__

  def __init__(self, filenames, compression_type=None, buffer_size=None):
    """Creates a `TFRecordDataset`.

    Args:
      filenames: A `tf.string` tensor containing one or more filenames.
      compression_type: (Optional.) A `tf.string` scalar evaluating to one of
        `""` (no compression), `"ZLIB"`, or `"GZIP"`.
      buffer_size: (Optional.) A `tf.int64` scalar representing the number of
        bytes in the read buffer. 0 means no buffering.
    """
    # Force the type to string even if filenames is an empty list.
    self._filenames = ops.convert_to_tensor(
        filenames, dtypes.string, name="filenames")
    self._compression_type = convert.optional_param_to_tensor(
        "compression_type",
        compression_type,
        argument_default="",
        argument_dtype=dtypes.string)
    self._buffer_size = convert.optional_param_to_tensor(
        "buffer_size",
        buffer_size,
        argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES)
    variant_tensor = gen_dataset_ops.tf_record_dataset(
        self._filenames, self._compression_type, self._buffer_size)
    super(_TFRecordDataset, self).__init__(variant_tensor)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:25,代码来源:readers.py

示例3: __init__

  def __init__(self, input_dataset, map_func, cycle_length, block_length,
               sloppy, buffer_output_elements, prefetch_input_elements):
    """See `tf.contrib.data.parallel_interleave()` for details."""
    super(ParallelInterleaveDataset, self).__init__()
    self._input_dataset = input_dataset

    @function.Defun(*nest.flatten(
        sparse.as_dense_types(input_dataset.output_types,
                              input_dataset.output_classes)))
    def tf_map_func(*args):
      """A wrapper for Defun that facilitates shape inference."""
      # Pass in shape information from the input_dataset.
      dense_shapes = sparse.as_dense_shapes(input_dataset.output_shapes,
                                            input_dataset.output_classes)
      for arg, shape in zip(args, nest.flatten(dense_shapes)):
        arg.set_shape(shape)

      nested_args = nest.pack_sequence_as(input_dataset.output_types, args)
      nested_args = sparse.deserialize_sparse_tensors(
          nested_args, input_dataset.output_types, input_dataset.output_shapes,
          input_dataset.output_classes)
      if dataset_ops._should_unpack_args(nested_args):  # pylint: disable=protected-access
        dataset = map_func(*nested_args)
      else:
        dataset = map_func(nested_args)

      if not isinstance(dataset, dataset_ops.Dataset):
        raise TypeError("`map_func` must return a `Dataset` object.")

      self._output_classes = dataset.output_classes
      self._output_types = dataset.output_types
      self._output_shapes = dataset.output_shapes

      return dataset._as_variant_tensor()  # pylint: disable=protected-access

    self._map_func = tf_map_func
    self._map_func.add_to_graph(ops.get_default_graph())

    self._cycle_length = ops.convert_to_tensor(
        cycle_length, dtype=dtypes.int64, name="cycle_length")
    self._block_length = ops.convert_to_tensor(
        block_length, dtype=dtypes.int64, name="block_length")
    self._sloppy = ops.convert_to_tensor(
        sloppy, dtype=dtypes.bool, name="sloppy")
    self._buffer_output_elements = convert.optional_param_to_tensor(
        "buffer_output_elements",
        buffer_output_elements,
        argument_default=2 * block_length)
    self._prefetch_input_elements = convert.optional_param_to_tensor(
        "prefetch_input_elements",
        prefetch_input_elements,
        argument_default=2 * cycle_length)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:52,代码来源:interleave_ops.py

示例4: __init__

 def __init__(self, filename, compression_type=None):
   self._filename = ops.convert_to_tensor(
       filename, dtypes.string, name="filename")
   self._compression_type = convert.optional_param_to_tensor(
       "compression_type",
       compression_type,
       argument_default="",
       argument_dtype=dtypes.string)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:8,代码来源:writers.py

示例5: __init__

  def __init__(self,
               filenames,
               record_defaults,
               buffer_size=None,
               header=False,
               field_delim=",",
               use_quote_delim=True,
               na_value="",
               select_cols=None):
    """Creates a `CsvDataset` by reading and decoding CSV files.

    The elements of this dataset correspond to records from the file(s).
    RFC 4180 format is expected for CSV files
    (https://tools.ietf.org/html/rfc4180)
    Note that we allow leading and trailing spaces with int or float field.


    For example, suppose we have a file 'my_file0.csv' with four CSV columns of
    different data types:
    ```
    abcdefg,4.28E10,5.55E6,12
    hijklmn,-5.3E14,,2
    ```

    We can construct a CsvDataset from it as follows:
    ```python
    dataset = tf.contrib.data.CsvDataset(
      "my_file*.csv",
      [tf.float32,  # Required field, use dtype or empty tensor
       tf.constant([0.0], dtype=tf.float32),  # Optional field, default to 0.0
       tf.int32,  # Required field, use dtype or empty tensor
       ],
      select_cols=[1,2,3]  # Only parse last three columns
    )
    ```

    The expected output of its iterations is:
    ```python
    next = dataset.make_one_shot_iterator().get_next()
    with tf.Session() as sess:
      while True:
        try:
          print(sess.run(nxt))
        except tf.errors.OutOfRangeError:
          break

    >> (4.28e10, 5.55e6, 12)
    >> (-5.3e14, 0.0, 2)
    ```

    Args:
      filenames: A `tf.string` tensor containing one or more filenames.
      record_defaults: A list of default values for the CSV fields. Each item in
        the list is either a valid CSV `DType` (float32, float64, int32, int64,
        string), or a `Tensor` object with one of the above types. One per
        column of CSV data, with either a scalar `Tensor` default value for the
        column if it is optional, or `DType` or empty `Tensor` if required. If
        both this and `select_columns` are specified, these must have the same
        lengths, and `column_defaults` is assumed to be sorted in order of
        increasing column index.
      buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
        to buffer while reading files. Defaults to 4MB.
      header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)
        have header line(s) that should be skipped when parsing. Defaults to
        `False`.
      field_delim: (Optional.) A `tf.string` scalar containing the delimiter
        character that separates fields in a record. Defaults to `","`.
      use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats
        double quotation marks as regular characters inside of string fields
        (ignoring RFC 4180, Section 2, Bullet 5). Defaults to `True`.
      na_value: (Optional.) A `tf.string` scalar indicating a value that will
        be treated as NA/NaN.
      select_cols: (Optional.) A sorted list of column indices to select from
        the input data. If specified, only this subset of columns will be
        parsed. Defaults to parsing all columns.
    """
    super(CsvDataset, self).__init__()
    self._filenames = ops.convert_to_tensor(
        filenames, dtype=dtypes.string, name="filenames")
    record_defaults = [
        constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
        for x in record_defaults
    ]
    self._record_defaults = ops.convert_n_to_tensor(
        record_defaults, name="record_defaults")
    self._buffer_size = convert.optional_param_to_tensor(
        "buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
    self._header = ops.convert_to_tensor(
        header, dtype=dtypes.bool, name="header")
    self._field_delim = ops.convert_to_tensor(
        field_delim, dtype=dtypes.string, name="field_delim")
    self._use_quote_delim = ops.convert_to_tensor(
        use_quote_delim, dtype=dtypes.bool, name="use_quote_delim")
    self._na_value = ops.convert_to_tensor(
        na_value, dtype=dtypes.string, name="na_value")
    self._select_cols = convert.optional_param_to_tensor(
        "select_cols",
        select_cols,
        argument_default=[],
        argument_dtype=dtypes.int64,
#.........这里部分代码省略.........
开发者ID:jfreedman0,项目名称:tensorflow,代码行数:101,代码来源:readers.py

示例6: testIntegerDefault

 def testIntegerDefault(self):
   resp = convert.optional_param_to_tensor("foo", None)
   self.assertEqual(0, self.evaluate(resp))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:3,代码来源:convert_test.py

示例7: testString

 def testString(self):
   resp = convert.optional_param_to_tensor("bar", "value", "default",
                                           dtypes.string)
   self.assertEqual(compat.as_bytes("value"), self.evaluate(resp))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:4,代码来源:convert_test.py

示例8: testInteger

 def testInteger(self):
   resp = convert.optional_param_to_tensor("foo", 3)
   self.assertEqual(3, self.evaluate(resp))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:3,代码来源:convert_test.py

示例9: testString

 def testString(self):
   resp = convert.optional_param_to_tensor("bar", "value", "default",
                                           dtypes.string)
   with self.test_session() as sess:
     self.assertEqual(compat.as_bytes("value"), sess.run(resp))
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:5,代码来源:convert_test.py

示例10: testIntegerDefault

 def testIntegerDefault(self):
   resp = convert.optional_param_to_tensor("foo", None)
   with self.test_session() as sess:
     self.assertEqual(0, sess.run(resp))
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:4,代码来源:convert_test.py

示例11: testStringDefault

 def testStringDefault(self):
   resp = convert.optional_param_to_tensor("bar", None, "default",
                                           dtypes.string)
   with self.cached_session() as sess:
     self.assertEqual(compat.as_bytes("default"), sess.run(resp))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:5,代码来源:convert_test.py

示例12: testInteger

 def testInteger(self):
   resp = convert.optional_param_to_tensor("foo", 3)
   with self.cached_session() as sess:
     self.assertEqual(3, sess.run(resp))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:4,代码来源:convert_test.py


注:本文中的tensorflow.python.data.util.convert.optional_param_to_tensor函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。