當前位置: 首頁>>代碼示例>>Python>>正文


Python VideoFileClip.fl_image方法代碼示例

本文整理匯總了Python中moviepy.editor.VideoFileClip.fl_image方法的典型用法代碼示例。如果您正苦於以下問題:Python VideoFileClip.fl_image方法的具體用法?Python VideoFileClip.fl_image怎麽用?Python VideoFileClip.fl_image使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在moviepy.editor.VideoFileClip的用法示例。


在下文中一共展示了VideoFileClip.fl_image方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: run_video

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
def run_video():
    print("Run Video")

    from moviepy.editor import VideoFileClip

    file = "videos/challenge_video"
    
    clip = VideoFileClip("./" + file + ".mp4")
    output_video = "./" + file + "_processed.mp4"
    
    data_dir = './data'
    num_classes = 2

    global g_session
    global g_logits
    global g_keep_prob
    global g_input_image
    
    with tf.Session() as g_session:
        vgg_path = os.path.join(data_dir, 'vgg')

        correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        g_input_image, g_keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(g_session, vgg_path)
        layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)
        g_logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)
        
        print("Restoring model...")
        saver = tf.train.Saver()
        saver.restore(g_session, "./model/semantic_segmentation_model.ckpt")
        print("Model restored.")

        output_clip = clip.fl_image(process_video_image)
        # output_clip = clip.subclip(0, 1).fl_image(process_video_image)
        output_clip.write_videofile(output_video, audio=False)
開發者ID:Moecker,項目名稱:sdc_semantic_segmentation,代碼行數:38,代碼來源:main.py

示例2: range

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
    result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)

    return result

#%%
#import glob
#img_list = glob.glob('test_images/*.jpg')
#for i in range(len(img_list)):
#    process(cv2.imread(img_list[i]))

#%%
#import glob
#img_list = glob.glob('test_images/*.jpg')
#i = 0
#
#for img_path in img_list:
#    img = cv2.imread(img_path)
#    output_img = process(img)
#    print(output_img.shape)
#    i += 1
#    cv2.imwrite('test_images_output/output_image_'+str(i)+'.jpg', output_img)

#%%
video = VideoFileClip('project_video.mp4')
input = video.fl_image(process)
input.write_videofile('project_video_output_test.mp4', audio = False)




開發者ID:Mazinms,項目名稱:CarND-Advanced-Lane-Lines,代碼行數:28,代碼來源:main.py

示例3: print

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
print('')
print('Retaining with best hyper-parameters')

scaler = StandardScaler().fit(X_features)
X_features = scaler.transform(X_features)
svc = LinearSVC(C=best_c, penalty=best_penalty, loss=best_loss).fit(X_features, y_features)

vehicle_detector = vehicle.VehicleDetector(color_space=color_space,
                                  orient=orient,
                                  pix_per_cell=pix_per_cell,
                                  cell_per_block=cell_per_block,
                                  hog_channel=hog_channel,
                                  spatial_size=spatial_size,
                                  hist_bins=hist_bins,
                                  spatial_feat=spatial_feat,
                                  hist_feat=hist_feat,
                                  hog_feat=hog_feat,
                                  y_start_stop=y_start_stop,
                                  x_start_stop=x_start_stop,
                                  xy_window=xy_window,
                                  xy_overlap=xy_overlap,
                                  heat_threshold = 15,
                                  scaler=scaler,
                                  classifier=svc)

output_file = './processed_project_video.mp4'
input_file = './project_video.mp4'

clip = VideoFileClip(input_file)
out_clip = clip.fl_image(vehicle_detector.detect)
out_clip.write_videofile(output_file, audio=False)
開發者ID:dzungcamlang,項目名稱:CarND-Vehicle-Detection,代碼行數:33,代碼來源:main.py

示例4: process_image

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
    return draw_img



def process_image(img):
    output = pipeline(img)
    return output


# In[40]:


frame_ctx = frame()
video_output1 = 'project_video_output.mp4'
video_input1 = VideoFileClip('project_video.mp4')#.subclip(4, 7)#subclip(7, 12)#subclip(40, 42)
processed_video = video_input1.fl_image(process_image)
get_ipython().run_line_magic('time', 'processed_video.write_videofile(video_output1, audio=False)')


# 
# #### Discussion
# 
# ####1. Briefly discuss any problems / issues you faced in your implementation of this project. Where will your pipeline likely fail? What could you do to make it more robust?
# 
# There are some false positive (which I do not consider to be false) as those cars are comming from the other direction (on the left) and I could have used the xstart to regulate the starting of the sliding windows in X direction to filterout those detection or detect the lane line as in previous project and ignore any detection outside of the lane line but that will not be practicle as I think this is important to detect anything along the x axis even if they are outside of the lane. 
# 
# I could have use more sliding windows to make the bounded box more stable but just wanted to make it simple for now.
# 
# I could have calculate the centroid of each detected boxes per frame, measure the distance of these centroids among the boxes near the same location (with some += mergin) over few frames and can estimate the projected distance of the same box(car) on the next upcomming frame and draw it and could make the pipeline faster as was suggested in the course video. But that should also take into consideration the fact that car passing my car can slow down and in next frame it may appear closer as oppose to farther.
# 
# I coud have shown the number of car detected (labels[1]) based on heatmap & before and after applying threshold in realtime to make the video more predictable about the pipeline, also could have implemented a dynamic sliding window where  more scanning will be done when there is a change in the number of detection and apply even a harder threshold in this transient time to ignore the false positive etc with a firm detection.
開發者ID:maranemil,項目名稱:howto,代碼行數:33,代碼來源:CarND-Vehicle-Detection.ipynb.py

示例5: load_graph

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
            tf.import_graph_def(od_graph_def, name='')
    return graph

detection_graph = load_graph(SSD_V4)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')

clip = VideoFileClip('raw_video.avi')

def pipeline(img):
    draw_img = Image.fromarray(img)
    boxes, scores, classes = sess.run([detection_boxes, detection_scores, detection_classes], feed_dict={image_tensor: np.expand_dims(img, 0)})
    boxes = np.squeeze(boxes)
    scores = np.squeeze(scores)
    classes = np.squeeze(classes)
    confidence_cutoff = 0.8
    boxes, scores, classes = filter_boxes(confidence_cutoff, boxes, scores, classes)
    width, height = draw_img.size
    box_coords = to_image_coords(boxes, height, width)
    draw_boxes(draw_img, box_coords, classes)
    return np.array(draw_img)

with tf.Session(graph=detection_graph) as sess:
    image_tensor = sess.graph.get_tensor_by_name('image_tensor:0')
    detection_boxes = sess.graph.get_tensor_by_name('detection_boxes:0')
    detection_scores = sess.graph.get_tensor_by_name('detection_scores:0')
    detection_classes = sess.graph.get_tensor_by_name('detection_classes:0')
    new_clip = clip.fl_image(pipeline)
    new_clip.write_videofile('SSD_V4_result.mp4')
開發者ID:muhammedabdelnasser,項目名稱:An-Autonomous-Vehicle-System-For-Carla,代碼行數:33,代碼來源:tl_detection_classification.py

示例6: VideoFileClip

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
    road_warped = cv2.warpPerspective(road, Minv, img_size, flags=cv2.INTER_LINEAR)
    road_warped_bkg = cv2.warpPerspective(road_bkg, Minv, img_size, flags=cv2.INTER_LINEAR)

    base = cv2.addWeighted(img, 1.0, road_warped_bkg, -1.0, 0.0)
    result = cv2.addWeighted(base, 1.0, road_warped, .7, 0.0)

    ym_per_pix = curve_centers.ym_per_pix
    xm_per_pix= curve_centers.xm_per_pix

    curve_fit_cr = np.polyfit(np.array(res_yvals,np.float32)*ym_per_pix, np.array(leftx, np.float32)*xm_per_pix, 2)
    curverad = ((1 + (2*curve_fit_cr[0]*yvals[-1]*ym_per_pix + curve_fit_cr[1])**2)**1.5) / np.absolute(2*curve_fit_cr[0])
    camera_center = (left_fitx[-1] + right_fitx[-1])/2
    center_diff = (camera_center-warped.shape[1]/2)*xm_per_pix
    side_pos = 'left'
    if center_diff <=0:
        side_pos = 'right'

    cv2.putText(result, 'Radius of Curvature = '+str(round(curverad, 3))+'(m)',(50,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
    cv2.putText(result, 'Vehicle is '+str(abs(round(center_diff, 3)))+'m '+side_pos+' of center',(50,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)

    return result


if __name__ == '__main__':
    output_video = 'output_tracked1.mp4'
    input_video = 'project_video.mp4'

    clip1 = VideoFileClip(input_video)
    video_clip = clip1.fl_image(process_img)
    video_clip.write_videofile(output_video, audio=False)
開發者ID:axitkhurana,項目名稱:CarND-Advanced-Lane-Lines,代碼行數:32,代碼來源:video_gen.py

示例7: display

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
    combined_grad_mag_dir_hls[(combined_grad_mag_dir == 1) | (combined_sl == 1)] = 1
    # display(img, combined_grad_mag_dir_hls)

    warped, M = warp(combined_grad_mag_dir_hls)
    # display2(combined_grad_mag_dir_hls, warped)

    ploty, left_fitx, right_fitx, avg_left_fitx, avg_right_fitx = lanelines(warped)
    left_curverad, right_curverad = radius_of_curvature(ploty, left_fitx, right_fitx)
    avg_curverad = (left_curverad + right_curverad)/2

    offset = find_offset_from_center(img, left_fitx, right_fitx)

    result = project(warped, img, ploty, left_fitx, right_fitx, M, avg_curverad, offset, avg_left_fitx, avg_right_fitx)
    return result



test_images_path = 'test_images/'
images = glob.glob(test_images_path+"*.jpg")
for image in images:
    orig = mpimg.imread(image)
    left_fit_current = None
    right_fit_current = None
    pipeline(orig)


white_output = 'output_images/project_video_mapped.mp4'
clip1 = VideoFileClip("project_video.mp4")
white_clip = clip1.fl_image(pipeline)
white_clip.write_videofile(white_output, audio=False)
開發者ID:tablet6,項目名稱:AdvancedLaneFinding,代碼行數:32,代碼來源:pipeline.py

示例8: annotate_video

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
def annotate_video(input_file, output_file):
	""" Given input_file video, save annotated video to output_file """
	video = VideoFileClip(input_file)
	annotated_video = video.fl_image(annotate_image)
	annotated_video.write_videofile(output_file, audio=False)
開發者ID:WalesPeng,項目名稱:CarND-Advanced-Lane-Lines-P4,代碼行數:7,代碼來源:line_fit_video.py

示例9: process_image

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
test_images = os.listdir("test_images/")
for test_image in test_images:
    # load in image
    initial_img = mpimg.imread('test_images/' + test_image)

    img = process_image(initial_img)
    # save to output
    mpimg.imsave("output/" + test_image, img)

# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML

making_videos = True

if making_videos:
    white_output = 'white.mp4'
    clip1 = VideoFileClip("solidWhiteRight.mp4")
    white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
    white_clip.write_videofile(white_output, audio=False)

    yellow_output = 'yellow.mp4'
    clip2 = VideoFileClip('solidYellowLeft.mp4')
    yellow_clip = clip2.fl_image(process_image)
    yellow_clip.write_videofile(yellow_output, audio=False)

    challenge_output = 'extra.mp4'
    clip2 = VideoFileClip('challenge.mp4')
    challenge_clip = clip2.fl_image(process_image)
    challenge_clip.write_videofile(challenge_output, audio=False)
開發者ID:qpham01,項目名稱:GitHub,代碼行數:32,代碼來源:lane_line_pipeline.py

示例10: process_video

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
def process_video(input_path, output_path):
    #clip = VideoFileClip(input_path).subclip(23,28)
    clip = VideoFileClip(input_path)
    result = clip.fl_image(process_image)
    result.write_videofile(output_path)
開發者ID:kpasad,項目名稱:vehicle_detection,代碼行數:7,代碼來源:veh_det_pipeline.py

示例11: draw_detections

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
# adapt to movie based on PD_2016.py

import cv2
import numpy as np
from moviepy.editor import VideoFileClip

def draw_detections(img, rects, thickness = 1):
    for x, y, w, h in rects:
        pad_w, pad_h = int(0.15*w), int(0.05*h)
        cv2.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness)

def process_image(img):
    # initialize pedestrian detector
    hog = cv2.HOGDescriptor() #derive HOG features
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) #setSVMDetector
    # start pedestrian detection
    found, w = hog.detectMultiScale(img, winStride = (8,8), padding = (8, 8), scale = 1.15, finalThreshold = 1)
    draw_detections(img, found) # draw rectangles
    result = img
    return result

test_output = 'test_output.mp4'
clip1 = VideoFileClip('../Dataset/YouTube/【ドラレコ】橫浜市営バスの恐怖2.mp4')
test_clip = clip1.fl_image(process_image)
test_clip.write_videofile(test_output, audio=False)
開發者ID:nyoshimura,項目名稱:Pedestrian-Detection,代碼行數:27,代碼來源:PD_movie.py

示例12: VideoFileClip

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
#plt.savefig('./output_images/detected_lane.png')


# # 3. Pipeline for video
# 
# The processed project video can be found here.
# 
# The result on project_video.mp4 is shown below. The algorithm works did not work on the two challenge videos. I didn't go further to modify the code to work on these challenges. As mentioned above, I am not really convinced by the material in this project, so even it succeeds on the challenge videos, I have no confidence at all that it will work on new scenarios.

# In[39]:

output_dir= './output_images/'
clip_input_file = 'project_video.mp4'
clip_output_file = output_dir +'sample_' + clip_input_file
clip = VideoFileClip(clip_input_file).subclip(30, 40)
clip_output = clip.fl_image(process_image)
get_ipython().magic('time clip_output.write_videofile(clip_output_file, audio=False)')


# In[40]:

output_dir= './output_images/'
clip_input_file = 'project_video.mp4'
clip_output_file = output_dir +'processed_' + clip_input_file
clip = VideoFileClip(clip_input_file)
clip_output = clip.fl_image(process_image)
get_ipython().magic('time clip_output.write_videofile(clip_output_file, audio=False)')


# In[2]:
開發者ID:dvu4,項目名稱:CarND-Advanced-Lane-Lines,代碼行數:32,代碼來源:README.py

示例13: process_image1

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
         'theta':1,
         'threshold':40,
         'min_line_length':3,
         'max_line_gap':1}

# Process images in the "test_images" directory.
for path in glob.glob('test_images/solid*.jpg'):
    fname = path.split("/")[1]
    image = mpimg.imread(path)
    processed_image = process_image1(image)
    mpimg.imsave("test_images/processed_%s" % fname, processed_image)

# Process first test video.
white_output = 'white.mp4'
clip1 = VideoFileClip("solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image2)
white_clip.write_videofile(white_output, audio=False)

# Process second test video.
yellow_output = 'yellow.mp4'
clip1 = VideoFileClip("solidYellowLeft.mp4")
yellow_clip = clip1.fl_image(process_image2)
yellow_clip.write_videofile(yellow_output, audio=False)

# Parameters for part 2 (challenge)

theta = {'horizon':0.61,
         'hood':0.07,
         'trapezoid_top_factor':0.10,
         'trapezoid_bottom_factor':0.90,
         'angle_cutoff':0.75,
開發者ID:dventimi,項目名稱:CarND-LaneLines-P1,代碼行數:33,代碼來源:lanelines.py

示例14: process_video

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
##---------------------------------------------------------------------------------------

from moviepy.editor import VideoFileClip 


def process_video(img):

	out_img = detect_cars_with_heat(img, svc, X_scaler, orient, pix_per_cell, 
					cell_per_block, (spatial,spatial), histbin, heatmap_threshold)

	return out_img

white_output = '/home/haoyang/CarND-Vehicle-Detection/output_project_video.mp4'

clip1 = VideoFileClip('/home/haoyang/CarND-Vehicle-Detection/project_video.mp4')
white_clip = clip1.fl_image(process_video)
white_clip.write_videofile(white_output, audio=False)













開發者ID:uxvii,項目名稱:CarND-Vehicle-Detection,代碼行數:19,代碼來源:main.py

示例15: pipeline_yolo

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import fl_image [as 別名]
        image = mpimg.imread(filename)

        #(1) Yolo pipeline
        yolo_result = pipeline_yolo(image)
        plt.figure()
        plt.imshow(yolo_result)
        plt.title('yolo pipeline', fontsize=30)

        #(2) SVM pipeline
        draw_img = pipeline_svm(image)
        fig = plt.figure()
        plt.imshow(draw_img)
        plt.title('svm pipeline', fontsize=30)
        plt.show()

    elif demo == 2:
        # YOLO Pipeline
        video_output = 'examples/project_YOLO.mp4'
        clip1 = VideoFileClip("examples/project_video.mp4").subclip(30,32)
        clip = clip1.fl_image(pipeline_yolo)
        clip.write_videofile(video_output, audio=False)

    else:
        # SVM pipeline
        video_output = 'examples/project_svm.mp4'
        clip1 = VideoFileClip("examples/project_video.mp4").subclip(30,32)
        clip = clip1.fl_image(pipeline_svm)
        clip.write_videofile(video_output, audio=False)


開發者ID:nrsharon,項目名稱:vehicle-detection,代碼行數:30,代碼來源:main.py


注:本文中的moviepy.editor.VideoFileClip.fl_image方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。