當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.Dequantize方法代碼示例

本文整理匯總了Python中utils.Dequantize方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.Dequantize方法的具體用法?Python utils.Dequantize怎麽用?Python utils.Dequantize使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils的用法示例。


在下文中一共展示了utils.Dequantize方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_video_matrix

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Dequantize [as 別名]
def get_video_matrix(self, features, feature_size, max_frames,
                       max_quantized_value, min_quantized_value):
    """Decodes features from an input string and quantizes it.

    Args:
      features: raw feature values
      feature_size: length of each frame feature vector
      max_frames: number of frames (rows) in the output feature_matrix
      max_quantized_value: the maximum of the quantized value.
      min_quantized_value: the minimum of the quantized value.

    Returns:
      feature_matrix: matrix of all frame-features
      num_frames: number of frames in the sequence
    """
    decoded_features = tf.reshape(
        tf.cast(tf.decode_raw(features, tf.uint8), tf.float32),
        [-1, feature_size])

    num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames)
    feature_matrix = utils.Dequantize(decoded_features, max_quantized_value,
                                      min_quantized_value)
    feature_matrix = resize_axis(feature_matrix, 0, max_frames)
    return feature_matrix, num_frames 
開發者ID:google,項目名稱:youtube-8m,代碼行數:26,代碼來源:readers.py

示例2: get_video_matrix

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Dequantize [as 別名]
def get_video_matrix(self,
                       features,
                       feature_size,
                       max_frames,
                       max_quantized_value,
                       min_quantized_value):
    """Decodes features from an input string and quantizes it.

    Args:
      features: raw feature values
      feature_size: length of each frame feature vector
      max_frames: number of frames (rows) in the output feature_matrix
      max_quantized_value: the maximum of the quantized value.
      min_quantized_value: the minimum of the quantized value.

    Returns:
      feature_matrix: matrix of all frame-features
      num_frames: number of frames in the sequence
    """
    decoded_features = tf.reshape(
        tf.cast(tf.decode_raw(features, tf.uint8), tf.float32),
        [-1, feature_size])

    num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames)
    feature_matrix = utils.Dequantize(decoded_features,
                                      max_quantized_value,
                                      min_quantized_value)
    feature_matrix = resize_axis(feature_matrix, 0, max_frames)
    return feature_matrix, num_frames 
開發者ID:antoine77340,項目名稱:Youtube-8M-WILLOW,代碼行數:31,代碼來源:readers.py

示例3: get_frame_input_feature

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Dequantize [as 別名]
def get_frame_input_feature(input_file):
    features = []
    record_iterator = tf.python_io.tf_record_iterator(path=input_file)
    for i, string_record in enumerate(record_iterator):
        example = tf.train.SequenceExample()
        example.ParseFromString(string_record)

        # traverse the Example format to get data
        video_id = example.context.feature['video_id'].bytes_list.value[0]
        label = example.context.feature['labels'].int64_list.value[:]
        rgbs = []
        audios = []
        rgb_feature = example.feature_lists.feature_list['rgb'].feature
        for i in range(len(rgb_feature)):
            rgb = np.fromstring(rgb_feature[i].bytes_list.value[0], dtype=np.uint8).astype(np.float32)
            rgb = utils.Dequantize(rgb, 2, -2)
            rgbs.append(rgb)
        audio_feature = example.feature_lists.feature_list['audio'].feature
        for i in range(len(audio_feature)):
            audio = np.fromstring(audio_feature[i].bytes_list.value[0], dtype=np.uint8).astype(np.float32)
            audio = utils.Dequantize(audio, 2, -2)
            audios.append(audio)
        rgbs = np.array(rgbs)
        audios = np.array(audios)
        features.append((video_id, label, rgbs, audios))
    return features 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:28,代碼來源:YM_labels_matrix.py

示例4: get_video_matrix

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Dequantize [as 別名]
def get_video_matrix(self,
                       features,
                       feature_size,
                       max_frames,
                       max_quantized_value,
                       min_quantized_value):
    """Decodes features from an input string and quantizes it.

    Args:
      features: raw feature values
      feature_size: length of each frame feature vector
      max_frames: number of frames (rows) in the output feature_matrix
      max_quantized_value: the maximum of the quantized value.
      min_quantized_value: the minimum of the quantized value.

    Returns:
      feature_matrix: matrix of all frame-features
      num_frames: number of frames in the sequence
    """
    decoded_features = tf.reshape(
        tf.cast(tf.decode_raw(features, tf.uint8), tf.float32),
        [-1, feature_size])

    num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames)
    feature_matrix = utils.Dequantize(decoded_features,
                                      max_quantized_value,
                                      min_quantized_value)
    feature_matrix = resize_axis(feature_matrix, 0, max_frames)
    if self.prepare_distill:
        def_feature_matrix = tf.reshape(tf.decode_raw(features, tf.uint8), [-1, feature_size])
        def_feature_matrix = resize_axis(def_feature_matrix, 0, max_frames)
        return feature_matrix, num_frames, def_feature_matrix
    return feature_matrix, num_frames 
開發者ID:miha-skalic,項目名稱:youtube8mchallenge,代碼行數:35,代碼來源:readers.py

示例5: get_video_matrix

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Dequantize [as 別名]
def get_video_matrix(self,
                       features,
                       feature_size,
                       max_frames,
                       max_quantized_value,
                       min_quantized_value):
        """Decodes features from an input string and quantizes it.

        Args:
          features: raw feature values
          feature_size: length of each frame feature vector
          max_frames: number of frames (rows) in the output feature_matrix
          max_quantized_value: the maximum of the quantized value.
          min_quantized_value: the minimum of the quantized value.

        Returns:
          feature_matrix: matrix of all frame-features
          num_frames: number of frames in the sequence
        """
        decoded_features = tf.reshape(
            tf.cast(tf.decode_raw(features, tf.uint8), tf.float32),
            [-1, feature_size])

        num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames)
        feature_matrix = utils.Dequantize(decoded_features,
                                          max_quantized_value,
                                          min_quantized_value)
        feature_matrix = resize_axis(feature_matrix, 0, max_frames)
        return feature_matrix, num_frames 
開發者ID:pomonam,項目名稱:AttentionCluster,代碼行數:31,代碼來源:readers.py

示例6: frame_example_2_np

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Dequantize [as 別名]
def frame_example_2_np(seq_example_bytes, 
                       max_quantized_value=2,
                       min_quantized_value=-2):
  feature_names=['rgb','audio']
  feature_sizes = [1024, 128]
  with tf.Graph().as_default():
    contexts, features = tf.parse_single_sequence_example(
        seq_example_bytes,
        context_features={"video_id": tf.FixedLenFeature(
            [], tf.string),
                          "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
            for feature_name in feature_names
        })

    decoded_features = { name: tf.reshape(
        tf.cast(tf.decode_raw(features[name], tf.uint8), tf.float32),
        [-1, size]) for name, size in zip(feature_names, feature_sizes)
        }
    feature_matrices = {
        name: utils.Dequantize(decoded_features[name],
          max_quantized_value, min_quantized_value) for name in feature_names}
        
    with tf.Session() as sess:
      vid = sess.run(contexts['video_id'])
      labs = sess.run(contexts['labels'].values)
      rgb = sess.run(feature_matrices['rgb'])
      audio = sess.run(feature_matrices['audio'])
      
  return vid, labs, rgb, audio

       
#%% Split frame level file into three video level files: all, 1st half, 2nd half. 
開發者ID:mpekalski,項目名稱:Y8M,代碼行數:36,代碼來源:split_video.py

示例7: build_graph

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Dequantize [as 別名]
def build_graph():
    feature_names=['rgb','audio']
    feature_sizes = [1024, 128] 
    max_quantized_value=2
    min_quantized_value=-2

    seq_example_bytes = tf.placeholder(tf.string)
    contexts, features = tf.parse_single_sequence_example(
        seq_example_bytes,
        context_features={"video_id": tf.FixedLenFeature(
            [], tf.string),
                          "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
            for feature_name in feature_names
        })

    decoded_features = { name: tf.reshape(
        tf.cast(tf.decode_raw(features[name], tf.uint8), tf.float32),
        [-1, size]) for name, size in zip(feature_names, feature_sizes)
        }
    feature_matrices = {
        name: utils.Dequantize(decoded_features[name],
          max_quantized_value, min_quantized_value) for name in feature_names}
        
    tf.add_to_collection("vid_tsr", contexts['video_id'])
    tf.add_to_collection("labs_tsr", contexts['labels'].values)
    tf.add_to_collection("rgb_tsr", feature_matrices['rgb'])
    tf.add_to_collection("audio_tsr", feature_matrices['audio'])
    tf.add_to_collection("seq_example_bytes", seq_example_bytes)
 
#   with tf.Session() as sess:
#       writer = tf.summary.FileWriter('./graphs', sess.graph) 
開發者ID:mpekalski,項目名稱:Y8M,代碼行數:35,代碼來源:split_video.py


注:本文中的utils.Dequantize方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。