当前位置: 首页>>代码示例>>Python>>正文


Python string_ops.as_string函数代码示例

本文整理汇总了Python中tensorflow.python.ops.string_ops.as_string函数的典型用法代码示例。如果您正苦于以下问题:Python as_string函数的具体用法?Python as_string怎么用?Python as_string使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了as_string函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testFloat

  def testFloat(self):
    float_inputs_ = [
        0, 1, -1, 0.5, 0.25, 0.125, float("INF"), float("NAN"), float("-INF")
    ]

    with self.test_session():
      for dtype in (dtypes.float32, dtypes.float64):
        input_ = array_ops.placeholder(dtype)

        output = string_ops.as_string(input_, shortest=True)
        result = output.eval(feed_dict={input_: float_inputs_})
        s = lambda strs: [x.decode("ascii") for x in strs]
        self.assertAllEqual(s(result), ["%g" % x for x in float_inputs_])

        output = string_ops.as_string(input_, scientific=True)
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%e" % x for x in float_inputs_])

        output = string_ops.as_string(input_)
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%f" % x for x in float_inputs_])

        output = string_ops.as_string(input_, width=3)
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%3f" % x for x in float_inputs_])

        output = string_ops.as_string(input_, width=3, fill="0")
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%03f" % x for x in float_inputs_])

        output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%03g" % x for x in float_inputs_])

        output = string_ops.as_string(input_, precision=10, width=3)
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%03.10f" % x for x in float_inputs_])

        output = string_ops.as_string(
            input_, precision=10, width=3, fill="0", shortest=True)
        result = output.eval(feed_dict={input_: float_inputs_})
        self.assertAllEqual(s(result), ["%03.10g" % x for x in float_inputs_])

      with self.assertRaisesOpError("Cannot select both"):
        output = string_ops.as_string(input_, scientific=True, shortest=True)
        output.eval(feed_dict={input_: float_inputs_})

      with self.assertRaisesOpError("Fill string must be one or fewer"):
        output = string_ops.as_string(input_, fill="ab")
        output.eval(feed_dict={input_: float_inputs_})
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:50,代码来源:as_string_op_test.py

示例2: testComplex

  def testComplex(self):
    float_inputs_ = [
        0, 1, -1, 0.5, 0.25, 0.125, complex("INF"), complex("NAN"),
        complex("-INF")
    ]
    complex_inputs_ = [(x + (x + 1) * 1j) for x in float_inputs_]

    with self.test_session():
      for dtype in (dtypes.complex64,):
        input_ = array_ops.placeholder(dtype)

        def clean_nans(s_l):
          return [s.decode("ascii").replace("-nan", "nan") for s in s_l]

        output = string_ops.as_string(input_, shortest=True)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%g,%g)" % (x.real, x.imag) for x in complex_inputs_])

        output = string_ops.as_string(input_, scientific=True)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%e,%e)" % (x.real, x.imag) for x in complex_inputs_])

        output = string_ops.as_string(input_)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%f,%f)" % (x.real, x.imag) for x in complex_inputs_])

        output = string_ops.as_string(input_, width=3)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%03f,%03f)" % (x.real, x.imag) for x in complex_inputs_])

        output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%03g,%03g)" % (x.real, x.imag) for x in complex_inputs_])

        output = string_ops.as_string(input_, precision=10, width=3)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%03.10f,%03.10f)" % (x.real, x.imag) for x in complex_inputs_])

        output = string_ops.as_string(
            input_, precision=10, width=3, fill="0", shortest=True)
        result = output.eval(feed_dict={input_: complex_inputs_})
        self.assertAllEqual(
            clean_nans(result),
            ["(%03.10g,%03.10g)" % (x.real, x.imag) for x in complex_inputs_])

      with self.assertRaisesOpError("Cannot select both"):
        output = string_ops.as_string(input_, scientific=True, shortest=True)
        output.eval(feed_dict={input_: complex_inputs_})
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:60,代码来源:as_string_op_test.py

示例3: input_fn

 def input_fn():
   start = random_ops.random_uniform(
       (), minval=0, maxval=sequence_length, dtype=dtypes.int32, seed=seed)
   # Concatenate lyrics_list so inputs and labels wrap when start > 0.
   lyrics_list_concat = lyrics_list + lyrics_list
   inputs_dense = array_ops.slice(lyrics_list_concat, [start],
                                  [sequence_length])
   indices = array_ops.constant(
       [[i, 0] for i in range(sequence_length)], dtype=dtypes.int64)
   dense_shape = [sequence_length, 1]
   inputs = sparse_tensor.SparseTensor(
       indices=indices, values=inputs_dense, dense_shape=dense_shape)
   table = lookup.string_to_index_table_from_tensor(
       mapping=list(vocab), default_value=-1, name='lookup')
   labels = table.lookup(
       array_ops.slice(lyrics_list_concat, [start + 1], [sequence_length]))
   input_key = string_ops.string_join([
       'key_', string_ops.as_string(
           random_ops.random_uniform(
               (),
               minval=0,
               maxval=10000000,
               dtype=dtypes.int32,
               seed=seed))
   ])
   return {'lyrics': inputs, input_key_column_name: input_key}, labels
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:26,代码来源:state_saving_rnn_estimator_test.py

示例4: _testDistribution

  def _testDistribution(self, initial_known):
    classes = np.random.randint(5, size=(20000,))  # Uniformly sampled
    target_dist = [0.9, 0.05, 0.05, 0.0, 0.0]
    initial_dist = [0.2] * 5 if initial_known else None
    iterator = (dataset_ops.Dataset.from_tensor_slices(classes).shuffle(
        200, seed=21).map(lambda c: (c, string_ops.as_string(c))).apply(
            resampling.rejection_resample(
                target_dist=target_dist,
                initial_dist=initial_dist,
                class_func=lambda c, _: c,
                seed=27)).make_initializable_iterator())
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.test_session() as sess:
      sess.run(init_op)
      returned = []
      with self.assertRaises(errors.OutOfRangeError):
        while True:
          returned.append(sess.run(get_next))

    returned_classes, returned_classes_and_data = zip(*returned)
    _, returned_data = zip(*returned_classes_and_data)
    self.assertAllEqual([compat.as_bytes(str(c))
                         for c in returned_classes], returned_data)
    total_returned = len(returned_classes)
    # Subsampling rejects a large percentage of the initial data in
    # this case.
    self.assertGreater(total_returned, 20000 * 0.2)
    class_counts = np.array([
        len([True for v in returned_classes if v == c])
        for c in range(5)])
    returned_dist = class_counts / total_returned
    self.assertAllClose(target_dist, returned_dist, atol=1e-2)
开发者ID:DILASSS,项目名称:tensorflow,代码行数:34,代码来源:resample_test.py

示例5: testStateSaverScopeNames

 def testStateSaverScopeNames(self):
   batch_size = constant_op.constant(2)
   sqss_scope_name = "unique_scope_name_for_sqss"
   num_unroll = 2
   length = 3
   key = string_ops.string_join([
       "key_", string_ops.as_string(
           math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
   ])
   padded_length = 4
   sequences = {
       "seq1": np.random.rand(padded_length, 5),
       "seq2": np.random.rand(padded_length, 4, 2)
   }
   context = {"context1": [3, 4]}
   initial_states = {
       "state1": np.random.rand(6, 7),
       "state2": np.random.rand(8)
   }
   state_saver = sqss.SequenceQueueingStateSaver(
       batch_size=batch_size,
       num_unroll=num_unroll,
       input_length=length,
       input_key=key,
       input_sequences=sequences,
       input_context=context,
       initial_states=initial_states,
       name=sqss_scope_name)
   prefetch_op = state_saver.prefetch_op
   next_batch = state_saver.next_batch
   self.assertTrue(
       state_saver.barrier.barrier_ref.name.startswith("%s/" %
                                                       sqss_scope_name))
   self.assertTrue(prefetch_op.name.startswith("%s/" % sqss_scope_name))
   self.assertTrue(next_batch.key.name.startswith("%s/" % sqss_scope_name))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:sequence_queueing_state_saver_test.py

示例6: _transform_feature

  def _transform_feature(self, inputs):
    input_tensor = inputs.get(self.key)
    if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
      raise ValueError('SparseColumn input must be a SparseTensor.')

    if (input_tensor.dtype != dtypes.string and
        not input_tensor.dtype.is_integer):
      raise ValueError('input tensors dtype must be string or integer. '
                       'dtype: {}, column_name: {}'.format(
                           input_tensor.dtype, self.key))

    if self.dtype.is_integer != input_tensor.dtype.is_integer:
      raise ValueError(
          'Column dtype and SparseTensors dtype must be compatible. '
          'key: {}, column dtype: {}, tensor dtype: {}'.format(
              self.key, self.dtype, input_tensor.dtype))

    if self.dtype == dtypes.string:
      sparse_values = input_tensor.values
    else:
      sparse_values = string_ops.as_string(input_tensor.values)

    sparse_id_values = string_ops.string_to_hash_bucket_fast(
        sparse_values, self.hash_bucket_size, name='lookup')
    return sparse_tensor_lib.SparseTensor(
        input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
开发者ID:finardi,项目名称:tensorflow,代码行数:26,代码来源:feature_column.py

示例7: testDistribution

  def testDistribution(self, initial_known):
    classes = np.random.randint(5, size=(20000,))  # Uniformly sampled
    target_dist = [0.9, 0.05, 0.05, 0.0, 0.0]
    initial_dist = [0.2] * 5 if initial_known else None
    classes = math_ops.to_int64(classes)  # needed for Windows build.
    dataset = dataset_ops.Dataset.from_tensor_slices(classes).shuffle(
        200, seed=21).map(lambda c: (c, string_ops.as_string(c))).repeat()

    get_next = dataset.apply(
        resampling.rejection_resample(
            target_dist=target_dist,
            initial_dist=initial_dist,
            class_func=lambda c, _: c,
            seed=27)).make_one_shot_iterator().get_next()

    with self.cached_session() as sess:
      returned = []
      while len(returned) < 4000:
        returned.append(sess.run(get_next))

    returned_classes, returned_classes_and_data = zip(*returned)
    _, returned_data = zip(*returned_classes_and_data)
    self.assertAllEqual([compat.as_bytes(str(c))
                         for c in returned_classes], returned_data)
    total_returned = len(returned_classes)
    class_counts = np.array([
        len([True for v in returned_classes if v == c])
        for c in range(5)])
    returned_dist = class_counts / total_returned
    self.assertAllClose(target_dist, returned_dist, atol=1e-2)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:30,代码来源:resample_test.py

示例8: testLargeInt

  def testLargeInt(self):
    # Cannot use values outside -128..127 for test, because we're also
    # testing int8
    s = lambda strs: [x.decode("ascii") for x in strs]

    with self.test_session():
      input_ = array_ops.placeholder(dtypes.int32)
      int_inputs_ = [np.iinfo(np.int32).min, np.iinfo(np.int32).max]
      output = string_ops.as_string(input_)
      result = output.eval(feed_dict={input_: int_inputs_})
      self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])

      input_ = array_ops.placeholder(dtypes.int64)
      int_inputs_ = [np.iinfo(np.int64).min, np.iinfo(np.int64).max]
      output = string_ops.as_string(input_)
      result = output.eval(feed_dict={input_: int_inputs_})
      self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:17,代码来源:as_string_op_test.py

示例9: testHalfInt

  def testHalfInt(self):
    s = lambda strs: [x.decode("ascii") for x in strs]

    with self.test_session():
      input_ = array_ops.placeholder(dtypes.int16)
      int_inputs_ = [np.iinfo(np.int16).min, np.iinfo(np.int16).max]
      output = string_ops.as_string(input_)
      result = output.eval(feed_dict={input_: int_inputs_})
      self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:9,代码来源:as_string_op_test.py

示例10: setUp

 def setUp(self):
   super(BatchSequencesWithStatesTest, self).setUp()
   self.value_length = 4
   ind1 = np.array([
       [0, 0],
       [1, 0], [1, 3], [1, 4],
       [3, 2], [3, 3]])
   val1 = np.array([0, 10, 13, 14, 32, 33])
   shape1 = np.array([self.value_length, 6])
   sp_tensor1 = sparse_tensor.SparseTensor(
       array_ops.constant(ind1, dtypes.int64),
       array_ops.constant(val1, dtypes.int64),
       array_ops.constant(shape1, dtypes.int64))
   ind2 = np.array([
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 2],
       [1, 0, 3],
       [1, 1, 0],
       [1, 1, 1],
       [1, 1, 2],
       [1, 2, 2]])
   val2 = np.array([1, 10, 12, 103, 150, 149, 150, 122])
   shape2 = np.array([self.value_length, 3, 4])
   sp_tensor2 = sparse_tensor.SparseTensor(
       array_ops.constant(ind2, dtypes.int64),
       array_ops.constant(val2, dtypes.int64),
       array_ops.constant(shape2, dtypes.int64))
   sp_tensor3 = sparse_tensor.SparseTensor(
       array_ops.constant([[1, 9], [2, 2], [2, 10]], dtypes.int64),
       array_ops.constant([7, 15, 2], dtypes.int64),
       array_ops.constant([5, 12], dtypes.int64)
   )
   self.sp_tensor3_expected = sparse_tensor.SparseTensorValue(
       [[0, 1, 9], [0, 2, 2], [0, 2, 10], [1, 1, 9], [1, 2, 2], [1, 2, 10]],
       [7, 15, 2, 7, 15, 2],
       [2, 5, 12]
   )
   self.batch_size = 2
   self.key = string_ops.string_join([
       "key_", string_ops.as_string(
           math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
   ])
   self.sequences = {
       "seq1": np.random.rand(self.value_length, 5),
       "seq2": np.random.rand(self.value_length, 4, 2),
       "seq3": sp_tensor1,
       "seq4": sp_tensor2}
   self.context = {
       "context1": [3, 4],
       "sp_context": sp_tensor3}
   self.initial_states = {
       "state1": np.random.rand(6, 7),
       "state2": np.random.rand(8)
   }
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:55,代码来源:batch_sequences_with_states_test.py

示例11: testBool

  def testBool(self):
    bool_inputs_ = [False, True]
    s = lambda strs: [x.decode("ascii") for x in strs]

    with self.test_session():
      for dtype in (dtypes.bool,):
        input_ = array_ops.placeholder(dtype)

        output = string_ops.as_string(input_)
        result = output.eval(feed_dict={input_: bool_inputs_})
        self.assertAllEqual(s(result), ["false", "true"])
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:11,代码来源:as_string_op_test.py

示例12: testUnbatchDatasetWithStrings

  def testUnbatchDatasetWithStrings(self):
    data = tuple([math_ops.range(10) for _ in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
    expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
    data = data.batch(2)
    self.assertEqual(expected_types, data.output_types)
    data = data.apply(batching.unbatch())
    self.assertEqual(expected_types, data.output_types)

    self.assertDatasetProduces(
        data, [(i, compat.as_bytes(str(i)), i) for i in range(10)])
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:12,代码来源:unbatch_test.py

示例13: testInt

  def testInt(self):
    # Cannot use values outside -128..127 for test, because we're also
    # testing int8
    int_inputs_ = [0, -1, 1, -128, 127, -101, 101, -0]
    s = lambda strs: [x.decode("ascii") for x in strs]

    with self.test_session():
      for dtype in (dtypes.int32, dtypes.int64, dtypes.int8):
        input_ = array_ops.placeholder(dtype)

        output = string_ops.as_string(input_)
        result = output.eval(feed_dict={input_: int_inputs_})
        self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])

        output = string_ops.as_string(input_, width=3)
        result = output.eval(feed_dict={input_: int_inputs_})
        self.assertAllEqual(s(result), ["%3d" % x for x in int_inputs_])

        output = string_ops.as_string(input_, width=3, fill="0")
        result = output.eval(feed_dict={input_: int_inputs_})
        self.assertAllEqual(s(result), ["%03d" % x for x in int_inputs_])

      with self.assertRaisesOpError("scientific and shortest"):
        output = string_ops.as_string(input_, scientific=True)
        output.eval(feed_dict={input_: int_inputs_})

      with self.assertRaisesOpError("scientific and shortest"):
        output = string_ops.as_string(input_, shortest=True)
        output.eval(feed_dict={input_: int_inputs_})

      with self.assertRaisesOpError("precision not supported"):
        output = string_ops.as_string(input_, precision=0)
        output.eval(feed_dict={input_: int_inputs_})
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:33,代码来源:as_string_op_test.py

示例14: _classification_output

def _classification_output(scores, n_classes, label_vocabulary=None):
  batch_size = array_ops.shape(scores)[0]
  if label_vocabulary:
    export_class_list = label_vocabulary
  else:
    export_class_list = string_ops.as_string(math_ops.range(n_classes))
  export_output_classes = array_ops.tile(
      input=array_ops.expand_dims(input=export_class_list, axis=0),
      multiples=[batch_size, 1])
  return export_output.ClassificationOutput(
      scores=scores,
      # `ClassificationOutput` requires string classes.
      classes=export_output_classes)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:13,代码来源:head.py

示例15: testVariableDevicePlacement

  def testVariableDevicePlacement(self):
    classes = np.random.randint(5, size=(20000,))  # Uniformly sampled
    target_dist = [0.9, 0.05, 0.05, 0.0, 0.0]
    with ops.device(
        device_setter.replica_device_setter(ps_tasks=1, ps_device="/cpu:0")):
      dataset = (dataset_ops.Dataset.from_tensor_slices(classes)
                 .shuffle(200, seed=21)
                 .map(lambda c: (c, string_ops.as_string(c))))
      dataset = dataset_ops.rejection_resample(
          dataset, target_dist=target_dist, initial_dist=None,
          class_func=lambda c, _: c, seed=27)

      self.assertEqual(1, len(variables.local_variables()))
      self.assertEqual(b"",
                       compat.as_bytes(variables.local_variables()[0].device))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:15,代码来源:resample_test.py


注:本文中的tensorflow.python.ops.string_ops.as_string函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。