本文整理汇总了Python中utils.Dequantize方法的典型用法代码示例。如果您正苦于以下问题:Python utils.Dequantize方法的具体用法?Python utils.Dequantize怎么用?Python utils.Dequantize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.Dequantize方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_video_matrix
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Dequantize [as 别名]
def get_video_matrix(self, features, feature_size, max_frames,
max_quantized_value, min_quantized_value):
"""Decodes features from an input string and quantizes it.
Args:
features: raw feature values
feature_size: length of each frame feature vector
max_frames: number of frames (rows) in the output feature_matrix
max_quantized_value: the maximum of the quantized value.
min_quantized_value: the minimum of the quantized value.
Returns:
feature_matrix: matrix of all frame-features
num_frames: number of frames in the sequence
"""
decoded_features = tf.reshape(
tf.cast(tf.decode_raw(features, tf.uint8), tf.float32),
[-1, feature_size])
num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames)
feature_matrix = utils.Dequantize(decoded_features, max_quantized_value,
min_quantized_value)
feature_matrix = resize_axis(feature_matrix, 0, max_frames)
return feature_matrix, num_frames
示例2: get_video_matrix
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Dequantize [as 别名]
def get_video_matrix(self,
features,
feature_size,
max_frames,
max_quantized_value,
min_quantized_value):
"""Decodes features from an input string and quantizes it.
Args:
features: raw feature values
feature_size: length of each frame feature vector
max_frames: number of frames (rows) in the output feature_matrix
max_quantized_value: the maximum of the quantized value.
min_quantized_value: the minimum of the quantized value.
Returns:
feature_matrix: matrix of all frame-features
num_frames: number of frames in the sequence
"""
decoded_features = tf.reshape(
tf.cast(tf.decode_raw(features, tf.uint8), tf.float32),
[-1, feature_size])
num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames)
feature_matrix = utils.Dequantize(decoded_features,
max_quantized_value,
min_quantized_value)
feature_matrix = resize_axis(feature_matrix, 0, max_frames)
return feature_matrix, num_frames
示例3: get_frame_input_feature
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Dequantize [as 别名]
def get_frame_input_feature(input_file):
features = []
record_iterator = tf.python_io.tf_record_iterator(path=input_file)
for i, string_record in enumerate(record_iterator):
example = tf.train.SequenceExample()
example.ParseFromString(string_record)
# traverse the Example format to get data
video_id = example.context.feature['video_id'].bytes_list.value[0]
label = example.context.feature['labels'].int64_list.value[:]
rgbs = []
audios = []
rgb_feature = example.feature_lists.feature_list['rgb'].feature
for i in range(len(rgb_feature)):
rgb = np.fromstring(rgb_feature[i].bytes_list.value[0], dtype=np.uint8).astype(np.float32)
rgb = utils.Dequantize(rgb, 2, -2)
rgbs.append(rgb)
audio_feature = example.feature_lists.feature_list['audio'].feature
for i in range(len(audio_feature)):
audio = np.fromstring(audio_feature[i].bytes_list.value[0], dtype=np.uint8).astype(np.float32)
audio = utils.Dequantize(audio, 2, -2)
audios.append(audio)
rgbs = np.array(rgbs)
audios = np.array(audios)
features.append((video_id, label, rgbs, audios))
return features
示例4: get_video_matrix
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Dequantize [as 别名]
def get_video_matrix(self,
features,
feature_size,
max_frames,
max_quantized_value,
min_quantized_value):
"""Decodes features from an input string and quantizes it.
Args:
features: raw feature values
feature_size: length of each frame feature vector
max_frames: number of frames (rows) in the output feature_matrix
max_quantized_value: the maximum of the quantized value.
min_quantized_value: the minimum of the quantized value.
Returns:
feature_matrix: matrix of all frame-features
num_frames: number of frames in the sequence
"""
decoded_features = tf.reshape(
tf.cast(tf.decode_raw(features, tf.uint8), tf.float32),
[-1, feature_size])
num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames)
feature_matrix = utils.Dequantize(decoded_features,
max_quantized_value,
min_quantized_value)
feature_matrix = resize_axis(feature_matrix, 0, max_frames)
if self.prepare_distill:
def_feature_matrix = tf.reshape(tf.decode_raw(features, tf.uint8), [-1, feature_size])
def_feature_matrix = resize_axis(def_feature_matrix, 0, max_frames)
return feature_matrix, num_frames, def_feature_matrix
return feature_matrix, num_frames
示例5: get_video_matrix
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Dequantize [as 别名]
def get_video_matrix(self,
features,
feature_size,
max_frames,
max_quantized_value,
min_quantized_value):
"""Decodes features from an input string and quantizes it.
Args:
features: raw feature values
feature_size: length of each frame feature vector
max_frames: number of frames (rows) in the output feature_matrix
max_quantized_value: the maximum of the quantized value.
min_quantized_value: the minimum of the quantized value.
Returns:
feature_matrix: matrix of all frame-features
num_frames: number of frames in the sequence
"""
decoded_features = tf.reshape(
tf.cast(tf.decode_raw(features, tf.uint8), tf.float32),
[-1, feature_size])
num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames)
feature_matrix = utils.Dequantize(decoded_features,
max_quantized_value,
min_quantized_value)
feature_matrix = resize_axis(feature_matrix, 0, max_frames)
return feature_matrix, num_frames
示例6: frame_example_2_np
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Dequantize [as 别名]
def frame_example_2_np(seq_example_bytes,
max_quantized_value=2,
min_quantized_value=-2):
feature_names=['rgb','audio']
feature_sizes = [1024, 128]
with tf.Graph().as_default():
contexts, features = tf.parse_single_sequence_example(
seq_example_bytes,
context_features={"video_id": tf.FixedLenFeature(
[], tf.string),
"labels": tf.VarLenFeature(tf.int64)},
sequence_features={
feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
for feature_name in feature_names
})
decoded_features = { name: tf.reshape(
tf.cast(tf.decode_raw(features[name], tf.uint8), tf.float32),
[-1, size]) for name, size in zip(feature_names, feature_sizes)
}
feature_matrices = {
name: utils.Dequantize(decoded_features[name],
max_quantized_value, min_quantized_value) for name in feature_names}
with tf.Session() as sess:
vid = sess.run(contexts['video_id'])
labs = sess.run(contexts['labels'].values)
rgb = sess.run(feature_matrices['rgb'])
audio = sess.run(feature_matrices['audio'])
return vid, labs, rgb, audio
#%% Split frame level file into three video level files: all, 1st half, 2nd half.
示例7: build_graph
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Dequantize [as 别名]
def build_graph():
feature_names=['rgb','audio']
feature_sizes = [1024, 128]
max_quantized_value=2
min_quantized_value=-2
seq_example_bytes = tf.placeholder(tf.string)
contexts, features = tf.parse_single_sequence_example(
seq_example_bytes,
context_features={"video_id": tf.FixedLenFeature(
[], tf.string),
"labels": tf.VarLenFeature(tf.int64)},
sequence_features={
feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
for feature_name in feature_names
})
decoded_features = { name: tf.reshape(
tf.cast(tf.decode_raw(features[name], tf.uint8), tf.float32),
[-1, size]) for name, size in zip(feature_names, feature_sizes)
}
feature_matrices = {
name: utils.Dequantize(decoded_features[name],
max_quantized_value, min_quantized_value) for name in feature_names}
tf.add_to_collection("vid_tsr", contexts['video_id'])
tf.add_to_collection("labs_tsr", contexts['labels'].values)
tf.add_to_collection("rgb_tsr", feature_matrices['rgb'])
tf.add_to_collection("audio_tsr", feature_matrices['audio'])
tf.add_to_collection("seq_example_bytes", seq_example_bytes)
# with tf.Session() as sess:
# writer = tf.summary.FileWriter('./graphs', sess.graph)