當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.string_join方法代碼示例

本文整理匯總了Python中tensorflow.string_join方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.string_join方法的具體用法?Python tensorflow.string_join怎麽用?Python tensorflow.string_join使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.string_join方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: testStringJoin

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def testStringJoin(self):
    input0 = ["a", "b"]
    input1 = "a"
    input2 = [["b"], ["c"]]

    with self.test_session():
      output = tf.string_join([input0, input1])
      self.assertAllEqual(output.eval(), [b"aa", b"ba"])

      output = tf.string_join([input0, input1], separator="--")
      self.assertAllEqual(output.eval(), [b"a--a", b"b--a"])

      output = tf.string_join([input0, input1, input0], separator="--")
      self.assertAllEqual(output.eval(), [b"a--a--a", b"b--a--b"])

      output = tf.string_join([input1] * 4, separator="!")
      self.assertEqual(output.eval(), b"a!a!a!a")

      output = tf.string_join([input2] * 2, separator="")
      self.assertAllEqual(output.eval(), [[b"bb"], [b"cc"]])

      with self.assertRaises(ValueError):  # Inconsistent shapes
        tf.string_join([input0, input2]).eval() 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:25,代碼來源:string_join_op_test.py

示例2: read_and_decode_ppm

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def read_and_decode_ppm(self, filename_queue):
        def read_ppm(self, filename):
            img = misc.imread(filename).astype('float32')
            return img   
        
        flying_h = 384
        flying_w = 512
        img1_name = tf.string_join([self.img_dir, '/', filename_queue[0]])
        img2_name = tf.string_join([self.img_dir, '/', filename_queue[1]])

        img1 = tf.py_func(read_ppm, [img1_name], tf.float32)
        img2 = tf.py_func(read_ppm, [img2_name], tf.float32)

        img1 = tf.reshape(img1, [flying_h, flying_w, 3])
        img2 = tf.reshape(img2, [flying_h, flying_w, 3])
        return img1, img2 
開發者ID:ppliuboy,項目名稱:DDFlow,代碼行數:18,代碼來源:datasets.py

示例3: read_and_decode_distillation

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def read_and_decode_distillation(self, filename_queue):
        img1_name = tf.string_join([self.img_dir, '/', filename_queue[0]])
        img2_name = tf.string_join([self.img_dir, '/', filename_queue[1]])     
        img1 = tf.image.decode_png(tf.read_file(img1_name), channels=3)
        img1 = tf.cast(img1, tf.float32)
        img2 = tf.image.decode_png(tf.read_file(img2_name), channels=3)
        img2 = tf.cast(img2, tf.float32)    
        
        flow_occ_fw_name = tf.string_join([self.fake_flow_occ_dir, '/flow_occ_fw_', filename_queue[2], '.png'])
        flow_occ_bw_name = tf.string_join([self.fake_flow_occ_dir, '/flow_occ_bw_', filename_queue[2], '.png'])
        flow_occ_fw = tf.image.decode_png(tf.read_file(flow_occ_fw_name), dtype=tf.uint16, channels=3)
        flow_occ_fw = tf.cast(flow_occ_fw, tf.float32)   
        flow_occ_bw = tf.image.decode_png(tf.read_file(flow_occ_bw_name), dtype=tf.uint16, channels=3)
        flow_occ_bw = tf.cast(flow_occ_bw, tf.float32)             
        flow_fw, occ_fw = self.extract_flow_and_mask(flow_occ_fw)
        flow_bw, occ_bw = self.extract_flow_and_mask(flow_occ_bw)
        return img1, img2, flow_fw, flow_bw, occ_fw, occ_bw 
開發者ID:ppliuboy,項目名稱:DDFlow,代碼行數:19,代碼來源:datasets.py

示例4: expand_path

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def expand_path(self, sample):
        """ Expands audio paths for the given sample. """
        return dict(sample, **{f'{instrument}_path': tf.string_join(
            (self._audio_path, sample[f'{instrument}_path']), SEPARATOR)
            for instrument in self._instruments}) 
開發者ID:deezer,項目名稱:spleeter,代碼行數:7,代碼來源:dataset.py

示例5: __init__

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def __init__(self, config, batch_size, one_hot=False):
        self.lookup = None
        reader = tf.TextLineReader()
        filename_queue = tf.train.string_input_producer(["chargan.txt"])
        key, x = reader.read(filename_queue)
        vocabulary = self.get_vocabulary()

        table = tf.contrib.lookup.string_to_index_table_from_tensor(
            mapping = vocabulary, default_value = 0)

        x = tf.string_join([x, tf.constant(" " * 64)]) 
        x = tf.substr(x, [0], [64])
        x = tf.string_split(x,delimiter='')
        x = tf.sparse_tensor_to_dense(x, default_value=' ')
        x = tf.reshape(x, [64])
        x = table.lookup(x)
        self.one_hot = one_hot
        if one_hot:
            x = tf.one_hot(x, len(vocabulary))
            x = tf.cast(x, dtype=tf.float32)
            x = tf.reshape(x, [1, int(x.get_shape()[0]), int(x.get_shape()[1]), 1])
        else:
            x = tf.cast(x, dtype=tf.float32)
            x -= len(vocabulary)/2.0
            x /= len(vocabulary)/2.0
            x = tf.reshape(x, [1,1, 64, 1])

        num_preprocess_threads = 8

        x = tf.train.shuffle_batch(
          [x],
          batch_size=batch_size,
          num_threads=num_preprocess_threads,
          capacity= 5000,
          min_after_dequeue=500,
          enqueue_many=True)

        self.x = x
        self.table = table 
開發者ID:HyperGAN,項目名稱:HyperGAN,代碼行數:41,代碼來源:common.py

示例6: build_row_key_dataset

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def build_row_key_dataset(num_records, row_prefix):
  if num_records is not None:
    ds = tf.data.Dataset.range(num_records)
  else:
    ds = tf.contrib.data.Counter()
  if num_records is None:
    width = 10
  else:
    width = pad_width(num_records)
  ds = ds.map(lambda idx: tf.as_string(idx, width=width, fill='0'))
  if row_prefix is not None:
    ds = ds.map(lambda idx: tf.string_join([row_prefix, idx]))
  return ds 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:15,代碼來源:tfrecords_to_bigtable.py

示例7: testStateSaverScopeNames

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def testStateSaverScopeNames(self):
    batch_size = tf.constant(2)
    sqss_scope_name = "unique_scope_name_for_sqss"
    num_unroll = 2
    length = 3
    key = tf.string_join(["key_", tf.as_string(tf.cast(
        10000 * tf.random_uniform(()), tf.int32))])
    padded_length = 4
    sequences = {"seq1": np.random.rand(padded_length, 5),
                 "seq2": np.random.rand(padded_length, 4, 2)}
    context = {"context1": [3, 4]}
    initial_states = {"state1": np.random.rand(6, 7),
                      "state2": np.random.rand(8)}
    state_saver = tf.contrib.training.SequenceQueueingStateSaver(
        batch_size=batch_size,
        num_unroll=num_unroll,
        input_length=length,
        input_key=key,
        input_sequences=sequences,
        input_context=context,
        initial_states=initial_states,
        name=sqss_scope_name)
    prefetch_op = state_saver.prefetch_op
    next_batch = state_saver.next_batch
    self.assertTrue(state_saver.barrier.barrier_ref.name.startswith(
        "%s/" % sqss_scope_name))
    self.assertTrue(prefetch_op.name.startswith("%s/" % sqss_scope_name))
    self.assertTrue(next_batch.key.name.startswith("%s/" % sqss_scope_name)) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:30,代碼來源:sequence_queueing_state_saver_test.py

示例8: setUp

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def setUp(self):
    super(BatchSequencesWithStatesTest, self).setUp()
    self.value_length = 4
    self.batch_size = 2
    self.key = tf.string_join(["key_", tf.as_string(tf.cast(
        10000 * tf.random_uniform(()), tf.int32))])
    self.sequences = {"seq1": np.random.rand(self.value_length, 5),
                      "seq2": np.random.rand(self.value_length, 4, 2)}
    self.context = {"context1": [3, 4]}
    self.initial_states = {"state1": np.random.rand(6, 7),
                           "state2": np.random.rand(8)} 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:13,代碼來源:batch_sequences_with_states_test.py

示例9: clip_to_waveform

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def clip_to_waveform(clip, clip_dir=None):
  """Decodes a WAV clip into a waveform tensor."""
  # Decode the WAV-format clip into a waveform tensor where
  # the values lie in [-1, +1].
  clip_path = tf.string_join([clip_dir, clip], separator=os.sep)
  clip_data = tf.read_file(clip_path)
  waveform, sr = tf_audio.decode_wav(clip_data)
  #waveform = tf.Print(waveform, [tf.shape(waveform), waveform], message='Waveform:', summarize=100)
  # Assert that the clip has the expected sample rate.
  check_sr = tf.assert_equal(sr, SAMPLE_RATE)
  # and that it is mono.
  check_channels = tf.assert_equal(tf.shape(waveform)[1], 1)
  with tf.control_dependencies([tf.group(check_sr, check_channels)]):
    return tf.squeeze(waveform) 
開發者ID:DCASE-REPO,項目名稱:dcase2019_task2_baseline,代碼行數:16,代碼來源:inputs.py

示例10: read_and_decode

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def read_and_decode(self, filename_queue):
        img0_name = tf.string_join([self.img_dir, '/', filename_queue[0]])
        img1_name = tf.string_join([self.img_dir, '/', filename_queue[1]])
        img2_name = tf.string_join([self.img_dir, '/', filename_queue[2]])
 
        img0 = tf.image.decode_png(tf.read_file(img0_name), channels=3)
        img0 = tf.cast(img0, tf.float32)        
        img1 = tf.image.decode_png(tf.read_file(img1_name), channels=3)
        img1 = tf.cast(img1, tf.float32)
        img2 = tf.image.decode_png(tf.read_file(img2_name), channels=3)
        img2 = tf.cast(img2, tf.float32)    
 
        return img0, img1, img2    
    
    # For Validation or Testing 
開發者ID:ppliuboy,項目名稱:SelFlow,代碼行數:17,代碼來源:datasets.py

示例11: _parse_string_line

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def _parse_string_line(string_line, root_path):
        """
        解析文本中的一行字符串行,得到圖片路徑(拚接圖片根目錄)和標簽
        :param string_line: 文本中的一行字符串,image_name label0 label1 label2 label3 ...
        :param root_path: 圖片根目錄
        :return: DatasetV1Adapter<(圖片路徑Tensor(shape=(), dtype=string),標簽Tensor(shape=(?,), dtype=float32))>
        """
        strings = tf.string_split([string_line], delimiter=' ').values
        image_path = tf.string_join([root_path, strings[0]], separator=os.sep)
        labels = tf.string_to_number(strings[1:])
        return image_path, labels 
開發者ID:zheng-yuwei,項目名稱:multi-label-classification,代碼行數:13,代碼來源:file_util.py

示例12: read_image

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def read_image(im):
  """Reads an image."""
  filename = tf.string_join([FLAGS.data_dir, im])
  image = tf.read_file(filename)
  image = tf.image.decode_jpeg(image, 3)
  image = tf.image.convert_image_dtype(image, tf.float32)
  image = tf.image.resize_images(image, [346, 346])
  image = image[23:-24, 23:-24]
  image = image * 2 - 1
  return image 
開發者ID:fengyang0317,項目名稱:unsupervised_captioning,代碼行數:12,代碼來源:caption_infer.py

示例13: add_distance_transform

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def add_distance_transform(tensors, labels, distance_transform_fn):
  args_list = [tensors["unnormalized_img"], tensors["label"],
               tensors["raw_label"], labels[Constants.STRATEGY], labels[Constants.IGNORE_CLASSES]]

  if "old_label" in tensors:
    args_list.append(tensors["old_label"])

  u0, u1, num_clicks = tf.py_func(distance_transform_fn,
                                  args_list,
                                  [tf.float32, tf.float32, tf.int64],
                                  name="create_distance_transform")

  u0 = tf.expand_dims(u0, axis=2)
  u0.set_shape(tensors["unnormalized_img"].get_shape().as_list()[:-1] + [1])

  u1 = tf.expand_dims(u1, axis=2)
  u1.set_shape(tensors["unnormalized_img"].get_shape().as_list()[:-1] + [1])

  shape = tensors["tag"].get_shape()
  im_path = tf.string_join([tensors["tag"], tf.as_string(num_clicks)], separator=":", name="JoinPath")
  im_path.set_shape(shape)

  tensors[Constants.DT_NEG] = u0
  tensors[Constants.DT_POS] = u1
  tensors["tag"] = im_path

  return tensors 
開發者ID:JonathonLuiten,項目名稱:PReMVOS,代碼行數:29,代碼來源:Reader.py

示例14: make_status_message

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def make_status_message(model):
  """Makes a string `Tensor` of training status."""
  return tf.string_join(
      [
          'Starting train step: current_image_id: ',
          tf.as_string(model.current_image_id), ', progress: ',
          tf.as_string(model.progress), ', num_blocks: {}'.format(
              model.num_blocks), ', batch_size: {}'.format(model.batch_size)
      ],
      name='status_message') 
開發者ID:generalized-iou,項目名稱:g-tensorflow-models,代碼行數:12,代碼來源:train.py

示例15: body

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import string_join [as 別名]
def body(self, pc, tape, cur, jumps, output):
        token = self.tokens[pc]
        inc_pc = tf.add(pc, 1)

        def stdin(c):
            #return tf.assign(self.tape[c], input(''))
            return self.tape

        return tf.cond(tf.equal(token, '+'),
            lambda: (inc_pc, tf.assign(self.tape[cur], self.tape[cur]+1), cur, jumps, output),
            lambda: tf.cond(tf.equal(token, '-'),
                lambda: (inc_pc, tf.assign(self.tape[cur], self.tape[cur]-1), cur, jumps, output),
                lambda: tf.cond(tf.equal(token, '>'),
                    lambda: (inc_pc, tape, tf.add(cur, 1), jumps, output),
                    lambda: tf.cond(tf.equal(token, '<'),
                        lambda: (inc_pc, tape, tf.subtract(cur, 1), jumps, output),
                        lambda: tf.cond(tf.equal(token, '.'),
                            lambda: (inc_pc, tape, cur, jumps, tf.string_join([output, ascii2char(tape[cur])])),
                            lambda: tf.cond(tf.equal(token, ','),
                                lambda: (inc_pc, stdin(cur), cur, jumps, output),
                                lambda: tf.cond(tf.equal(token, '['),
                                    lambda: tf.cond(tf.equal(self.tape[cur], 0),
                                        lambda: (jumps[pc], tape, cur, jumps, output),
                                        lambda: (inc_pc, tape, cur, jumps, output)),
                                    lambda: tf.cond(tf.equal(token, ']'),
                                        lambda: tf.cond(tf.not_equal(self.tape[cur], 0),
                                            lambda: (jumps[pc], tape, cur, jumps, output),
                                            lambda: (inc_pc, tape, cur, jumps, output)),
                                        lambda: (inc_pc, tape, cur, jumps, output) )))))))) 
開發者ID:akimach,項目名稱:EsotericTensorFlow,代碼行數:31,代碼來源:brain_fuck.py


注:本文中的tensorflow.string_join方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。