当前位置: 首页>>代码示例>>Python>>正文


Python VideoFileClip.fl_image方法代码示例

本文整理汇总了Python中moviepy.editor.VideoFileClip.fl_image方法的典型用法代码示例。如果您正苦于以下问题:Python VideoFileClip.fl_image方法的具体用法?Python VideoFileClip.fl_image怎么用?Python VideoFileClip.fl_image使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在moviepy.editor.VideoFileClip的用法示例。


在下文中一共展示了VideoFileClip.fl_image方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run_video

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
def run_video():
    print("Run Video")

    from moviepy.editor import VideoFileClip

    file = "videos/challenge_video"
    
    clip = VideoFileClip("./" + file + ".mp4")
    output_video = "./" + file + "_processed.mp4"
    
    data_dir = './data'
    num_classes = 2

    global g_session
    global g_logits
    global g_keep_prob
    global g_input_image
    
    with tf.Session() as g_session:
        vgg_path = os.path.join(data_dir, 'vgg')

        correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        g_input_image, g_keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(g_session, vgg_path)
        layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)
        g_logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)
        
        print("Restoring model...")
        saver = tf.train.Saver()
        saver.restore(g_session, "./model/semantic_segmentation_model.ckpt")
        print("Model restored.")

        output_clip = clip.fl_image(process_video_image)
        # output_clip = clip.subclip(0, 1).fl_image(process_video_image)
        output_clip.write_videofile(output_video, audio=False)
开发者ID:Moecker,项目名称:sdc_semantic_segmentation,代码行数:38,代码来源:main.py

示例2: range

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
    result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)

    return result

#%%
#import glob
#img_list = glob.glob('test_images/*.jpg')
#for i in range(len(img_list)):
#    process(cv2.imread(img_list[i]))

#%%
#import glob
#img_list = glob.glob('test_images/*.jpg')
#i = 0
#
#for img_path in img_list:
#    img = cv2.imread(img_path)
#    output_img = process(img)
#    print(output_img.shape)
#    i += 1
#    cv2.imwrite('test_images_output/output_image_'+str(i)+'.jpg', output_img)

#%%
video = VideoFileClip('project_video.mp4')
input = video.fl_image(process)
input.write_videofile('project_video_output_test.mp4', audio = False)




开发者ID:Mazinms,项目名称:CarND-Advanced-Lane-Lines,代码行数:28,代码来源:main.py

示例3: print

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
print('')
print('Retaining with best hyper-parameters')

scaler = StandardScaler().fit(X_features)
X_features = scaler.transform(X_features)
svc = LinearSVC(C=best_c, penalty=best_penalty, loss=best_loss).fit(X_features, y_features)

vehicle_detector = vehicle.VehicleDetector(color_space=color_space,
                                  orient=orient,
                                  pix_per_cell=pix_per_cell,
                                  cell_per_block=cell_per_block,
                                  hog_channel=hog_channel,
                                  spatial_size=spatial_size,
                                  hist_bins=hist_bins,
                                  spatial_feat=spatial_feat,
                                  hist_feat=hist_feat,
                                  hog_feat=hog_feat,
                                  y_start_stop=y_start_stop,
                                  x_start_stop=x_start_stop,
                                  xy_window=xy_window,
                                  xy_overlap=xy_overlap,
                                  heat_threshold = 15,
                                  scaler=scaler,
                                  classifier=svc)

output_file = './processed_project_video.mp4'
input_file = './project_video.mp4'

clip = VideoFileClip(input_file)
out_clip = clip.fl_image(vehicle_detector.detect)
out_clip.write_videofile(output_file, audio=False)
开发者ID:dzungcamlang,项目名称:CarND-Vehicle-Detection,代码行数:33,代码来源:main.py

示例4: process_image

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
    return draw_img



def process_image(img):
    output = pipeline(img)
    return output


# In[40]:


frame_ctx = frame()
video_output1 = 'project_video_output.mp4'
video_input1 = VideoFileClip('project_video.mp4')#.subclip(4, 7)#subclip(7, 12)#subclip(40, 42)
processed_video = video_input1.fl_image(process_image)
get_ipython().run_line_magic('time', 'processed_video.write_videofile(video_output1, audio=False)')


# 
# #### Discussion
# 
# ####1. Briefly discuss any problems / issues you faced in your implementation of this project. Where will your pipeline likely fail? What could you do to make it more robust?
# 
# There are some false positive (which I do not consider to be false) as those cars are comming from the other direction (on the left) and I could have used the xstart to regulate the starting of the sliding windows in X direction to filterout those detection or detect the lane line as in previous project and ignore any detection outside of the lane line but that will not be practicle as I think this is important to detect anything along the x axis even if they are outside of the lane. 
# 
# I could have use more sliding windows to make the bounded box more stable but just wanted to make it simple for now.
# 
# I could have calculate the centroid of each detected boxes per frame, measure the distance of these centroids among the boxes near the same location (with some += mergin) over few frames and can estimate the projected distance of the same box(car) on the next upcomming frame and draw it and could make the pipeline faster as was suggested in the course video. But that should also take into consideration the fact that car passing my car can slow down and in next frame it may appear closer as oppose to farther.
# 
# I coud have shown the number of car detected (labels[1]) based on heatmap & before and after applying threshold in realtime to make the video more predictable about the pipeline, also could have implemented a dynamic sliding window where  more scanning will be done when there is a change in the number of detection and apply even a harder threshold in this transient time to ignore the false positive etc with a firm detection.
开发者ID:maranemil,项目名称:howto,代码行数:33,代码来源:CarND-Vehicle-Detection.ipynb.py

示例5: load_graph

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
            tf.import_graph_def(od_graph_def, name='')
    return graph

detection_graph = load_graph(SSD_V4)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')

clip = VideoFileClip('raw_video.avi')

def pipeline(img):
    draw_img = Image.fromarray(img)
    boxes, scores, classes = sess.run([detection_boxes, detection_scores, detection_classes], feed_dict={image_tensor: np.expand_dims(img, 0)})
    boxes = np.squeeze(boxes)
    scores = np.squeeze(scores)
    classes = np.squeeze(classes)
    confidence_cutoff = 0.8
    boxes, scores, classes = filter_boxes(confidence_cutoff, boxes, scores, classes)
    width, height = draw_img.size
    box_coords = to_image_coords(boxes, height, width)
    draw_boxes(draw_img, box_coords, classes)
    return np.array(draw_img)

with tf.Session(graph=detection_graph) as sess:
    image_tensor = sess.graph.get_tensor_by_name('image_tensor:0')
    detection_boxes = sess.graph.get_tensor_by_name('detection_boxes:0')
    detection_scores = sess.graph.get_tensor_by_name('detection_scores:0')
    detection_classes = sess.graph.get_tensor_by_name('detection_classes:0')
    new_clip = clip.fl_image(pipeline)
    new_clip.write_videofile('SSD_V4_result.mp4')
开发者ID:muhammedabdelnasser,项目名称:An-Autonomous-Vehicle-System-For-Carla,代码行数:33,代码来源:tl_detection_classification.py

示例6: VideoFileClip

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
    road_warped = cv2.warpPerspective(road, Minv, img_size, flags=cv2.INTER_LINEAR)
    road_warped_bkg = cv2.warpPerspective(road_bkg, Minv, img_size, flags=cv2.INTER_LINEAR)

    base = cv2.addWeighted(img, 1.0, road_warped_bkg, -1.0, 0.0)
    result = cv2.addWeighted(base, 1.0, road_warped, .7, 0.0)

    ym_per_pix = curve_centers.ym_per_pix
    xm_per_pix= curve_centers.xm_per_pix

    curve_fit_cr = np.polyfit(np.array(res_yvals,np.float32)*ym_per_pix, np.array(leftx, np.float32)*xm_per_pix, 2)
    curverad = ((1 + (2*curve_fit_cr[0]*yvals[-1]*ym_per_pix + curve_fit_cr[1])**2)**1.5) / np.absolute(2*curve_fit_cr[0])
    camera_center = (left_fitx[-1] + right_fitx[-1])/2
    center_diff = (camera_center-warped.shape[1]/2)*xm_per_pix
    side_pos = 'left'
    if center_diff <=0:
        side_pos = 'right'

    cv2.putText(result, 'Radius of Curvature = '+str(round(curverad, 3))+'(m)',(50,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
    cv2.putText(result, 'Vehicle is '+str(abs(round(center_diff, 3)))+'m '+side_pos+' of center',(50,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)

    return result


if __name__ == '__main__':
    output_video = 'output_tracked1.mp4'
    input_video = 'project_video.mp4'

    clip1 = VideoFileClip(input_video)
    video_clip = clip1.fl_image(process_img)
    video_clip.write_videofile(output_video, audio=False)
开发者ID:axitkhurana,项目名称:CarND-Advanced-Lane-Lines,代码行数:32,代码来源:video_gen.py

示例7: display

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
    combined_grad_mag_dir_hls[(combined_grad_mag_dir == 1) | (combined_sl == 1)] = 1
    # display(img, combined_grad_mag_dir_hls)

    warped, M = warp(combined_grad_mag_dir_hls)
    # display2(combined_grad_mag_dir_hls, warped)

    ploty, left_fitx, right_fitx, avg_left_fitx, avg_right_fitx = lanelines(warped)
    left_curverad, right_curverad = radius_of_curvature(ploty, left_fitx, right_fitx)
    avg_curverad = (left_curverad + right_curverad)/2

    offset = find_offset_from_center(img, left_fitx, right_fitx)

    result = project(warped, img, ploty, left_fitx, right_fitx, M, avg_curverad, offset, avg_left_fitx, avg_right_fitx)
    return result



test_images_path = 'test_images/'
images = glob.glob(test_images_path+"*.jpg")
for image in images:
    orig = mpimg.imread(image)
    left_fit_current = None
    right_fit_current = None
    pipeline(orig)


white_output = 'output_images/project_video_mapped.mp4'
clip1 = VideoFileClip("project_video.mp4")
white_clip = clip1.fl_image(pipeline)
white_clip.write_videofile(white_output, audio=False)
开发者ID:tablet6,项目名称:AdvancedLaneFinding,代码行数:32,代码来源:pipeline.py

示例8: annotate_video

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
def annotate_video(input_file, output_file):
	""" Given input_file video, save annotated video to output_file """
	video = VideoFileClip(input_file)
	annotated_video = video.fl_image(annotate_image)
	annotated_video.write_videofile(output_file, audio=False)
开发者ID:WalesPeng,项目名称:CarND-Advanced-Lane-Lines-P4,代码行数:7,代码来源:line_fit_video.py

示例9: process_image

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
test_images = os.listdir("test_images/")
for test_image in test_images:
    # load in image
    initial_img = mpimg.imread('test_images/' + test_image)

    img = process_image(initial_img)
    # save to output
    mpimg.imsave("output/" + test_image, img)

# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML

making_videos = True

if making_videos:
    white_output = 'white.mp4'
    clip1 = VideoFileClip("solidWhiteRight.mp4")
    white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
    white_clip.write_videofile(white_output, audio=False)

    yellow_output = 'yellow.mp4'
    clip2 = VideoFileClip('solidYellowLeft.mp4')
    yellow_clip = clip2.fl_image(process_image)
    yellow_clip.write_videofile(yellow_output, audio=False)

    challenge_output = 'extra.mp4'
    clip2 = VideoFileClip('challenge.mp4')
    challenge_clip = clip2.fl_image(process_image)
    challenge_clip.write_videofile(challenge_output, audio=False)
开发者ID:qpham01,项目名称:GitHub,代码行数:32,代码来源:lane_line_pipeline.py

示例10: process_video

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
def process_video(input_path, output_path):
    #clip = VideoFileClip(input_path).subclip(23,28)
    clip = VideoFileClip(input_path)
    result = clip.fl_image(process_image)
    result.write_videofile(output_path)
开发者ID:kpasad,项目名称:vehicle_detection,代码行数:7,代码来源:veh_det_pipeline.py

示例11: draw_detections

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
# adapt to movie based on PD_2016.py

import cv2
import numpy as np
from moviepy.editor import VideoFileClip

def draw_detections(img, rects, thickness = 1):
    for x, y, w, h in rects:
        pad_w, pad_h = int(0.15*w), int(0.05*h)
        cv2.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness)

def process_image(img):
    # initialize pedestrian detector
    hog = cv2.HOGDescriptor() #derive HOG features
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) #setSVMDetector
    # start pedestrian detection
    found, w = hog.detectMultiScale(img, winStride = (8,8), padding = (8, 8), scale = 1.15, finalThreshold = 1)
    draw_detections(img, found) # draw rectangles
    result = img
    return result

test_output = 'test_output.mp4'
clip1 = VideoFileClip('../Dataset/YouTube/【ドラレコ】横浜市営バスの恐怖2.mp4')
test_clip = clip1.fl_image(process_image)
test_clip.write_videofile(test_output, audio=False)
开发者ID:nyoshimura,项目名称:Pedestrian-Detection,代码行数:27,代码来源:PD_movie.py

示例12: VideoFileClip

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
#plt.savefig('./output_images/detected_lane.png')


# # 3. Pipeline for video
# 
# The processed project video can be found here.
# 
# The result on project_video.mp4 is shown below. The algorithm works did not work on the two challenge videos. I didn't go further to modify the code to work on these challenges. As mentioned above, I am not really convinced by the material in this project, so even it succeeds on the challenge videos, I have no confidence at all that it will work on new scenarios.

# In[39]:

output_dir= './output_images/'
clip_input_file = 'project_video.mp4'
clip_output_file = output_dir +'sample_' + clip_input_file
clip = VideoFileClip(clip_input_file).subclip(30, 40)
clip_output = clip.fl_image(process_image)
get_ipython().magic('time clip_output.write_videofile(clip_output_file, audio=False)')


# In[40]:

output_dir= './output_images/'
clip_input_file = 'project_video.mp4'
clip_output_file = output_dir +'processed_' + clip_input_file
clip = VideoFileClip(clip_input_file)
clip_output = clip.fl_image(process_image)
get_ipython().magic('time clip_output.write_videofile(clip_output_file, audio=False)')


# In[2]:
开发者ID:dvu4,项目名称:CarND-Advanced-Lane-Lines,代码行数:32,代码来源:README.py

示例13: process_image1

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
         'theta':1,
         'threshold':40,
         'min_line_length':3,
         'max_line_gap':1}

# Process images in the "test_images" directory.
for path in glob.glob('test_images/solid*.jpg'):
    fname = path.split("/")[1]
    image = mpimg.imread(path)
    processed_image = process_image1(image)
    mpimg.imsave("test_images/processed_%s" % fname, processed_image)

# Process first test video.
white_output = 'white.mp4'
clip1 = VideoFileClip("solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image2)
white_clip.write_videofile(white_output, audio=False)

# Process second test video.
yellow_output = 'yellow.mp4'
clip1 = VideoFileClip("solidYellowLeft.mp4")
yellow_clip = clip1.fl_image(process_image2)
yellow_clip.write_videofile(yellow_output, audio=False)

# Parameters for part 2 (challenge)

theta = {'horizon':0.61,
         'hood':0.07,
         'trapezoid_top_factor':0.10,
         'trapezoid_bottom_factor':0.90,
         'angle_cutoff':0.75,
开发者ID:dventimi,项目名称:CarND-LaneLines-P1,代码行数:33,代码来源:lanelines.py

示例14: process_video

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
##---------------------------------------------------------------------------------------

from moviepy.editor import VideoFileClip 


def process_video(img):

	out_img = detect_cars_with_heat(img, svc, X_scaler, orient, pix_per_cell, 
					cell_per_block, (spatial,spatial), histbin, heatmap_threshold)

	return out_img

white_output = '/home/haoyang/CarND-Vehicle-Detection/output_project_video.mp4'

clip1 = VideoFileClip('/home/haoyang/CarND-Vehicle-Detection/project_video.mp4')
white_clip = clip1.fl_image(process_video)
white_clip.write_videofile(white_output, audio=False)













开发者ID:uxvii,项目名称:CarND-Vehicle-Detection,代码行数:19,代码来源:main.py

示例15: pipeline_yolo

# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 别名]
        image = mpimg.imread(filename)

        #(1) Yolo pipeline
        yolo_result = pipeline_yolo(image)
        plt.figure()
        plt.imshow(yolo_result)
        plt.title('yolo pipeline', fontsize=30)

        #(2) SVM pipeline
        draw_img = pipeline_svm(image)
        fig = plt.figure()
        plt.imshow(draw_img)
        plt.title('svm pipeline', fontsize=30)
        plt.show()

    elif demo == 2:
        # YOLO Pipeline
        video_output = 'examples/project_YOLO.mp4'
        clip1 = VideoFileClip("examples/project_video.mp4").subclip(30,32)
        clip = clip1.fl_image(pipeline_yolo)
        clip.write_videofile(video_output, audio=False)

    else:
        # SVM pipeline
        video_output = 'examples/project_svm.mp4'
        clip1 = VideoFileClip("examples/project_video.mp4").subclip(30,32)
        clip = clip1.fl_image(pipeline_svm)
        clip.write_videofile(video_output, audio=False)


开发者ID:nrsharon,项目名称:vehicle-detection,代码行数:30,代码来源:main.py


注:本文中的moviepy.editor.VideoFileClip.fl_image方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。