当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.as_string方法代码示例

本文整理汇总了Python中tensorflow.as_string方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.as_string方法的具体用法?Python tensorflow.as_string怎么用?Python tensorflow.as_string使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.as_string方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testLargeInt

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def testLargeInt(self):
    # Cannot use values outside -128..127 for test, because we're also
    # testing int8
    s = lambda strs: [x.decode("ascii") for x in strs]

    with self.test_session():
      input_ = tf.placeholder(tf.int32)
      int_inputs_ = [np.iinfo(np.int32).min, np.iinfo(np.int32).max]
      output = tf.as_string(input_)
      result = output.eval(feed_dict={input_: int_inputs_})
      self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])

      input_ = tf.placeholder(tf.int64)
      int_inputs_ = [np.iinfo(np.int64).min, np.iinfo(np.int64).max]
      output = tf.as_string(input_)
      result = output.eval(feed_dict={input_: int_inputs_})
      self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_]) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:19,代码来源:as_string_op_test.py

示例2: _create_graph_with_table_initialized_by_table_output

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def _create_graph_with_table_initialized_by_table_output():
  filename = tf.compat.v1.placeholder(tf.string, ())
  table1 = _create_lookup_table_from_file(filename)

  # Use output from the first table to initialize the second table.
  keys = ['a', 'b', 'c']
  tensor_keys = tf.as_string(
      table1.lookup(tf.constant(keys, tf.string)))
  initializer2 = tf.lookup.KeyValueTensorInitializer(
      keys=tensor_keys,
      values=tf.range(len(keys), dtype=tf.int64),
      key_dtype=tf.string,
      value_dtype=tf.int64)
  table2 = tf.lookup.StaticHashTable(initializer2, default_value=-1)
  x = tf.compat.v1.placeholder(tf.string, (None,))
  y = table2.lookup(x)
  return {'filename': filename, 'x': x, 'y': y} 
开发者ID:tensorflow,项目名称:transform,代码行数:19,代码来源:graph_tools_test.py

示例3: testEstimatedProbabilityDensityMissingKey

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def testEstimatedProbabilityDensityMissingKey(self):
    input_size = 5

    with tf.compat.v1.Graph().as_default():
      input_data = tf.constant([[str(x + 1)] for x in range(input_size)])

      count = tf.constant([3] * input_size, tf.int64)
      boundaries = tf.as_string(tf.range(input_size))
      with mock.patch.object(
          mappers.analyzers, 'histogram', side_effect=[(count, boundaries)]):

        result = mappers.estimated_probability_density(
            input_data, categorical=True)

      expected = np.array([[0.2], [0.2], [0.2], [0.2], [0.]], np.float32)
      with tf.compat.v1.Session() as sess:
        sess.run(tf.compat.v1.tables_initializer())
        self.assertAllEqual(expected, sess.run(result)) 
开发者ID:tensorflow,项目名称:transform,代码行数:20,代码来源:mappers_test.py

示例4: markdown_table

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def markdown_table(step):
    # The text summary can also contain Markdown, including Markdown
    # tables. Markdown tables look like this:
    #
    #     | hello | there |
    #     |-------|-------|
    #     | this  | is    |
    #     | a     | table |
    #
    # The leading and trailing pipes in each row are optional, and the text
    # doesn't actually have to be neatly aligned, so we can create these
    # pretty easily. Let's do so.
    header_row = "Pounds of chocolate | Happiness"
    chocolate = tf.range(step)
    happiness = tf.square(chocolate + 1)
    chocolate_column = tf.as_string(chocolate)
    happiness_column = tf.as_string(happiness)
    table_rows = tf.strings.join([chocolate_column, " | ", happiness_column])
    table_body = tf.strings.reduce_join(inputs=table_rows, separator="\n")
    table = tf.strings.join([header_row, "---|---", table_body], separator="\n")
    preamble = "We conducted an experiment and found the following data:\n\n"
    result = tf.strings.join([preamble, table])
    tf.compat.v1.summary.text("chocolate_study", result) 
开发者ID:tensorflow,项目名称:tensorboard,代码行数:25,代码来源:text_demo.py

示例5: _parse_csv

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def _parse_csv(rows_string_tensor):
  """Takes the string input tensor and returns a dict of rank-2 tensors."""
  example_count = tf.io.decode_csv(
      records=rows_string_tensor,
      record_defaults=[tf.constant([0], dtype=tf.int32, shape=None)])[0]

  input_index, intra_input_index = _indices_from_example_count(example_count)
  annotation = tf.strings.join([
      'raw_input: ',
      tf.gather(rows_string_tensor, input_index), '; index: ',
      tf.as_string(intra_input_index)
  ])

  return {
      'example_count': tf.gather(example_count, input_index),
      'input_index': input_index,
      'intra_input_index': intra_input_index,
      'annotation': annotation,
  } 
开发者ID:tensorflow,项目名称:model-analysis,代码行数:21,代码来源:fake_multi_examples_per_input_estimator.py

示例6: _replace_empty_string_with_random_number

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def _replace_empty_string_with_random_number(string_tensor):
  """Returns string unchanged if non-empty, and random string tensor otherwise.

  The random string is an integer 0 and 2**63 - 1, casted as string.


  Args:
    string_tensor: A tf.tensor of dtype string.

  Returns:
    out_string: A tf.tensor of dtype string. If string_tensor contains the empty
      string, out_string will contain a random integer casted to a string.
      Otherwise string_tensor is returned unchanged.

  """

  empty_string = tf.constant('', dtype=tf.string, name='EmptyString')

  random_source_id = tf.as_string(
      tf.random_uniform(shape=[], maxval=2**63 - 1, dtype=tf.int64))

  out_string = tf.cond(
      tf.equal(string_tensor, empty_string),
      true_fn=lambda: random_source_id,
      false_fn=lambda: string_tensor)

  return out_string 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:29,代码来源:inputs.py

示例7: get_dataset

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def get_dataset(
    path: str,
    train_fraction: float = 0.7,
    split: str = "train"
) -> tf.data.Dataset:
    def split_label(*row):
        return dict(zip(FEATURES, row)), row[-1]

    def in_training_set(*row):
        num_buckets = 1000
        key = tf.strings.join(list(map(tf.as_string, row)))
        bucket_id = tf.strings.to_hash_bucket_fast(key, num_buckets)
        return bucket_id < int(train_fraction * num_buckets)

    def in_test_set(*row):
        return ~in_training_set(*row)

    data = tf.data.experimental.CsvDataset(
        path,
        [tf.float32] * len(FEATURES) + [tf.int32],
        header=True,
        field_delim=";")

    if split == "train":
        return data.filter(in_training_set).map(split_label)
    elif split == "test":
        return data.filter(in_test_set).map(split_label)
    else:
        raise ValueError("Unknown option split, must be 'train' or 'test'") 
开发者ID:criteo,项目名称:tf-yarn,代码行数:31,代码来源:winequality.py

示例8: build_metagraph_list

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def build_metagraph_list(self):
        """
        Convert MetaParams into TF Summary Format and create summary_op

        Args:
            None

        Returns:
            Merged TF Op for TEXT summary elements, should only be executed once to reduce data duplication

        """
        ops = []

        self.ignore_unknown_dtypes = True
        for key in sorted(self.meta_params):
            value = self.convert_data_to_string(self.meta_params[key])

            if len(value) == 0:
                continue
            if isinstance(value,str):
                ops.append(tf.summary.text(key, tf.convert_to_tensor(str(value))))
            else:
                ops.append(tf.summary.text(key, tf.as_string(tf.convert_to_tensor(value))))

        with tf.control_dependencies(tf.tuple(ops)):
            self.summary_merged = tf.summary.merge_all()

        return self.summary_merged 
开发者ID:rec-agent,项目名称:rec-rl,代码行数:30,代码来源:meta_parameter_recorder.py

示例9: version_9

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def version_9(cls, node, **kwargs):
    inp = kwargs["tensor_dict"][node.inputs[0]]
    to_type = node.attrs.get("to")

    if to_type == tf.string:
      return [tf.as_string(inp)]

    if inp.dtype == tf.string:
      if to_type not in [tf.float32, tf.float64, tf.int32, tf.int64]:
        raise RuntimeError(
            "Cast string to type {} is not supported in Tensorflow.".format(
                to_type))
      return [tf.strings.to_number(inp, to_type)]

    return [cls.make_tensor_from_onnx_node(node, **kwargs)] 
开发者ID:onnx,项目名称:onnx-tensorflow,代码行数:17,代码来源:cast.py

示例10: build_row_key_dataset

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def build_row_key_dataset(num_records, row_prefix):
  if num_records is not None:
    ds = tf.data.Dataset.range(num_records)
  else:
    ds = tf.contrib.data.Counter()
  if num_records is None:
    width = 10
  else:
    width = pad_width(num_records)
  ds = ds.map(lambda idx: tf.as_string(idx, width=width, fill='0'))
  if row_prefix is not None:
    ds = ds.map(lambda idx: tf.string_join([row_prefix, idx]))
  return ds 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:15,代码来源:tfrecords_to_bigtable.py

示例11: testInt

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def testInt(self):
    # Cannot use values outside -128..127 for test, because we're also
    # testing int8
    int_inputs_ = [0, -1, 1, -128, 127, -101, 101, -0]
    s = lambda strs: [x.decode("ascii") for x in strs]

    with self.test_session():
      for dtype in (tf.int32, tf.int64, tf.int8):
        input_ = tf.placeholder(dtype)

        output = tf.as_string(input_)
        result = output.eval(feed_dict={input_: int_inputs_})
        self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])

        output = tf.as_string(input_, width=3)
        result = output.eval(feed_dict={input_: int_inputs_})
        self.assertAllEqual(s(result), ["%3d" % x for x in int_inputs_])

        output = tf.as_string(input_, width=3, fill="0")
        result = output.eval(feed_dict={input_: int_inputs_})
        self.assertAllEqual(s(result), ["%03d" % x for x in int_inputs_])

      with self.assertRaisesOpError("scientific and shortest"):
        output = tf.as_string(input_, scientific=True)
        output.eval(feed_dict={input_: int_inputs_})

      with self.assertRaisesOpError("scientific and shortest"):
        output = tf.as_string(input_, shortest=True)
        output.eval(feed_dict={input_: int_inputs_})

      with self.assertRaisesOpError("precision not supported"):
        output = tf.as_string(input_, precision=0)
        output.eval(feed_dict={input_: int_inputs_}) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:35,代码来源:as_string_op_test.py

示例12: testBool

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def testBool(self):
    bool_inputs_ = [False, True]
    s = lambda strs: [x.decode("ascii") for x in strs]

    with self.test_session():
      for dtype in (tf.bool,):
        input_ = tf.placeholder(dtype)

        output = tf.as_string(input_)
        result = output.eval(feed_dict={input_: bool_inputs_})
        self.assertAllEqual(s(result), ["false", "true"]) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:13,代码来源:as_string_op_test.py

示例13: setUp

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def setUp(self):
    super(BatchSequencesWithStatesTest, self).setUp()
    self.value_length = 4
    self.batch_size = 2
    self.key = tf.string_join(["key_", tf.as_string(tf.cast(
        10000 * tf.random_uniform(()), tf.int32))])
    self.sequences = {"seq1": np.random.rand(self.value_length, 5),
                      "seq2": np.random.rand(self.value_length, 4, 2)}
    self.context = {"context1": [3, 4]}
    self.initial_states = {"state1": np.random.rand(6, 7),
                           "state2": np.random.rand(8)} 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:13,代码来源:batch_sequences_with_states_test.py

示例14: _build_pred

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def _build_pred(self):
    decoded, log_prob = tf.nn.ctc_greedy_decoder(self.logits, self.sequence_length)
    self.decoded = tf.identity(decoded[0], name='decoded')
    self.log_prob = tf.identity(log_prob, name='log_prob')
    if self.is_training:
      pred_str_labels = tf.as_string(self.decoded.values)
      pred_tensor = tf.SparseTensor(indices=self.decoded.indices, values=pred_str_labels, dense_shape=self.decoded.dense_shape)
      true_str_labels = tf.as_string(self.labels.values)
      true_tensor = tf.SparseTensor(indices=self.labels.indices, values=true_str_labels, dense_shape=self.labels.dense_shape)
      self.edit_distance = tf.reduce_mean(tf.edit_distance(pred_tensor, true_tensor, normalize=True), name='distance') 
开发者ID:rockyzhengwu,项目名称:document-ocr,代码行数:12,代码来源:model.py

示例15: preprocess

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import as_string [as 别名]
def preprocess(inputs):
    """tf.transform's callback function for preprocessing inputs.
    Args:
      inputs: map from feature keys to raw not-yet-transformed features.
    Returns:
      Map from string feature key to transformed feature operations.
    """
    outputs = {}
    for key in DENSE_FLOAT_FEATURE_KEYS:
        # Preserve this feature as a dense float, setting nan's to the mean.
        outputs[key] = transform.scale_to_z_score(inputs[key])

    for key in VOCAB_FEATURE_KEYS:
        # Build a vocabulary for this feature.
        if inputs[key].dtype == tf.string:
            vocab_tensor = inputs[key]
        else:
            vocab_tensor = tf.as_string(inputs[key])
        outputs[key] = transform.string_to_int(
            vocab_tensor, vocab_filename='vocab_' + key,
            top_k=VOCAB_SIZE, num_oov_buckets=OOV_SIZE)

    for key in BUCKET_FEATURE_KEYS:
        outputs[key] = transform.bucketize(inputs[key], FEATURE_BUCKET_COUNT)

    for key in CATEGORICAL_FEATURE_KEYS:
        outputs[key] = tf.to_int64(inputs[key])

    taxi_fare = inputs[FARE_KEY]
    taxi_tip = inputs[LABEL_KEY]
    # Test if the tip was > 20% of the fare.
    tip_threshold = tf.multiply(taxi_fare, tf.constant(0.2))
    outputs[LABEL_KEY] = tf.logical_and(
        tf.logical_not(tf.is_nan(taxi_fare)),
        tf.greater(taxi_tip, tip_threshold))

    return outputs 
开发者ID:kubeflow-kale,项目名称:kale,代码行数:39,代码来源:preprocessing.py


注:本文中的tensorflow.as_string方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。