当前位置: 首页>>代码示例>>Python>>正文


Python io.vread方法代码示例

本文整理汇总了Python中skvideo.io.vread方法的典型用法代码示例。如果您正苦于以下问题:Python io.vread方法的具体用法?Python io.vread怎么用?Python io.vread使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在skvideo.io的用法示例。


在下文中一共展示了io.vread方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __getitem__

# 需要导入模块: from skvideo import io [as 别名]
# 或者: from skvideo.io import vread [as 别名]
def __getitem__(self, index):
        annotation = self.annotation[index]
        video_path = annotation['path']
        clazz = annotation['class']

        metadata = ffprobe(video_path)
        duration = float(metadata["video"]["@duration"])

        output_parameter = self.base_parameter

        if self.clip_duration > 0:
            sta = int(random() * max((duration - self.clip_duration), 0.))
            output_parameter.update({
                "-ss": "{}".format(sta),
                "-t": "{}".format(min(self.clip_duration, duration - sta))
            })
        video_data = vread(video_path, outputdict=output_parameter)

        if self.transform:
            video_data = self.transform(video_data)

        return video_data, clazz 
开发者ID:jinyu121,项目名称:video2frame,代码行数:24,代码来源:pytorch_skvideo_dataset.py

示例2: GetFrames

# 需要导入模块: from skvideo import io [as 别名]
# 或者: from skvideo.io import vread [as 别名]
def GetFrames(fileName, skipLength = 1, debug = False):
    '''
    Get video frames after skipping
    Args:
        fileName: full fileName to read
        skipLength: Number of skips to perform
    Returns:
        Numpy array of frames
    '''

    if debug:
        print "Started creating Frame List for file", fileName

    try:
        frameList = vread(fileName)
    except:
        return None

    if debug:
        print 'The video shape is', frameList.shape
        print 'The array type is', frameList.dtype

    frameList = frameList[range(0, frameList.shape[0], skipLength), :, :, :]
    # Skip frames according to skipLength

    if debug:
        print 'The new shape after skipping', skipLength, 'is', frameList.shape
        print "Finished creating Frame List"

    return frameList 
开发者ID:amlankar,项目名称:adascan-public,代码行数:32,代码来源:dataSampling.py

示例3: main

# 需要导入模块: from skvideo import io [as 别名]
# 或者: from skvideo.io import vread [as 别名]
def main():

    with tf.Session() as sess:
        with tf.gfile.GFile('./1_checkpoint/16_bit_HE_to_HE_gt/lsmod_none.pb', 'rb') as f:
        # with tf.gfile.GFile('./1_checkpoint/16_bit_HE_to_HE_gt/lsmod_256.pb', 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            sess.graph.as_default()
            _ = tf.import_graph_def(graph_def)

            in_image = sess.graph.get_tensor_by_name('import/input:0')
            out_image = sess.graph.get_tensor_by_name('import/output:0')

            if not os.path.isdir(TEST_RESULT_DIR):
                os.makedirs(TEST_RESULT_DIR)

            for i, file0 in enumerate(in_paths):
                t0 = time.time()
                # raw = vread(file0)
                raw = np.load(file0)
                if raw.shape[0] > MAX_FRAME:
                    print('Video with shape', raw.shape, 'is too large. Splitted.')
                    count = 0
                    begin_frame = 0
                    while begin_frame < raw.shape[0]:
                        t1 = time.time()
                        print('processing segment %d ...' % (count + 1),
                        new_filename = '.'.join(file0.split('.')[:-1] + [str(count)] + file0.split('.')[-1::]))
                        process_video(sess, in_image, out_image, new_filename, raw[begin_frame: begin_frame + MAX_FRAME, :, :, :])
                        count += 1
                        begin_frame += MAX_FRAME
                        print('\t{}s'.format(time.time() - t1))
                else:
                    process_video(sess, in_image, out_image, file0, raw, out_file=train_ids[i] + '.mp4')
                print(train_ids[i], '\t{}s'.format(time.time() - t0)) 
开发者ID:PINTO0309,项目名称:PINTO_model_zoo,代码行数:37,代码来源:test.py

示例4: main

# 需要导入模块: from skvideo import io [as 别名]
# 或者: from skvideo.io import vread [as 别名]
def main():
    sess = tf.Session()
    in_image = tf.placeholder(tf.float32, [None, TEST_CROP_FRAME, None, None, 4])
    gt_image = tf.placeholder(tf.float32, [None, TEST_CROP_FRAME, None, None, 3])
    out_image = network(in_image)

    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
    if ckpt:
        print('loaded ' + ckpt.model_checkpoint_path)
        saver.restore(sess, ckpt.model_checkpoint_path)
    if not os.path.isdir(TEST_RESULT_DIR):
        os.makedirs(TEST_RESULT_DIR)

    for i, file0 in enumerate(in_paths):
        t0 = time.time()
        # raw = vread(file0)
        raw = np.load(file0)
        if raw.shape[0] > MAX_FRAME:
            print 'Video with shape', raw.shape, 'is too large. Splitted.'
            count = 0
            begin_frame = 0
            while begin_frame < raw.shape[0]:
                t1 = time.time()
                print 'processing segment %d ...' % (count + 1),
                new_filename = '.'.join(file0.split('.')[:-1] + [str(count)] + file0.split('.')[-1::])
                process_video(sess, in_image, out_image, new_filename, raw[begin_frame: begin_frame + MAX_FRAME, :, :, :])
                count += 1
                begin_frame += MAX_FRAME
                print '\t{}s'.format(time.time() - t1)
        else:
            process_video(sess, in_image, out_image, file0, raw, out_file=train_ids[i] + '.mp4')
        print train_ids[i], '\t{}s'.format(time.time() - t0) 
开发者ID:MichaelHYJiang,项目名称:Learning-to-See-Moving-Objects-in-the-Dark,代码行数:36,代码来源:test.py

示例5: get_mask

# 需要导入模块: from skvideo import io [as 别名]
# 或者: from skvideo.io import vread [as 别名]
def get_mask(model_options):
    def _center_crop(frames):
                    y,x = frames.shape[1:3]
                    assert y >= 224 and x >= 224, 'Video too small!'

                    if y <= 430 and x <= 430: 
                        # central crop     
                        y_d = (y-224)//2
                        x_d = (x-224)//2
                        frames = frames[:,y_d:y_d+224,x_d:x_d+224,:]

                    return frames

    with tf.Session() as sess:
        with tf.device('/cpu:0'):
            masks,images = model(model_options,sess)
            print 'Reading Video..'
            frames = vread(model_options['vid_file'])
            print 'Video File: ',model_options['vid_file'], 'has shape ',frames.shape
            length = frames.shape[0]

            frames = _center_crop(frames)
            frames = np.stack([frames[int(math.ceil(i*length/17)),:,:,:] for i in range(17)],0)

            if frames.shape[1] != 224:
                print 'Big sized video, resizing'
                # make the larger side close to 420
                f = max(frames.shape[1:3])/420
                sh = (np.array(frames.shape[1:3])/f).astype(np.int32)
                for i in range(frames.shape[0]):
                    frames[i] = (resize(frames[i],sh)*255).astype(np.uint8)

                frames = _center_crop(frames)
            
            print 'New shape: ',frames.shape
            assert frames.shape[1:3] == (224,224), 'Bad aspect ratio!'           
            feed_dict = {}
            feed_dict[images] = np.reshape(frames,(-1,224,224,3))
            print 'Getting Mask...'
            mask = sess.run(masks,feed_dict=feed_dict)
            mask = mask[0]
        
    gen_vis(mask,frames,model_options['vid_file']) 
开发者ID:amlankar,项目名称:adascan-public,代码行数:45,代码来源:demo.py


注:本文中的skvideo.io.vread方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。