本文整理匯總了Python中classes.query.Clip.save方法的典型用法代碼示例。如果您正苦於以下問題:Python Clip.save方法的具體用法?Python Clip.save怎麽用?Python Clip.save使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類classes.query.Clip
的用法示例。
在下文中一共展示了Clip.save方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_add_clip
# 需要導入模塊: from classes.query import Clip [as 別名]
# 或者: from classes.query.Clip import save [as 別名]
def test_add_clip(self):
""" Test the Clip.save method by adding multiple clips """
# Import additional classes that need the app defined first
from classes.query import Clip
# Find number of clips in project
num_clips = len(Clip.filter())
# Create clip
c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png"))
# Parse JSON
clip_data = json.loads(c.Json())
# Insert into project data
query_clip = Clip()
query_clip.data = clip_data
query_clip.save()
self.assertTrue(query_clip)
self.assertEqual(len(Clip.filter()), num_clips + 1)
# Save the clip again (which should not change the total # of clips)
query_clip.save()
self.assertEqual(len(Clip.filter()), num_clips + 1)
示例2: setUpClass
# 需要導入模塊: from classes.query import Clip [as 別名]
# 或者: from classes.query.Clip import save [as 別名]
def setUpClass(TestQueryClass):
""" Init unit test data """
# Create Qt application
TestQueryClass.app = OpenShotApp(sys.argv, mode="unittest")
TestQueryClass.clip_ids = []
TestQueryClass.file_ids = []
TestQueryClass.transition_ids = []
# Import additional classes that need the app defined first
from classes.query import Clip, File, Transition
# Insert some clips into the project data
for num in range(5):
# Create clip
c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png"))
# Parse JSON
clip_data = json.loads(c.Json())
# Insert into project data
query_clip = Clip()
query_clip.data = clip_data
query_clip.save()
# Keep track of the ids
TestQueryClass.clip_ids.append(query_clip.id)
# Insert some files into the project data
for num in range(5):
# Create file
r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0)
# Parse JSON
file_data = json.loads(r.Json())
# Insert into project data
query_file = File()
query_file.data = file_data
query_file.data["path"] = os.path.join(info.IMAGES_PATH, "AboutLogo.png")
query_file.data["media_type"] = "image"
query_file.save()
# Keep track of the ids
TestQueryClass.file_ids.append(query_file.id)
# Insert some transitions into the project data
for num in range(5):
# Create mask object
transition_object = openshot.Mask()
transitions_data = json.loads(transition_object.Json())
# Insert into project data
query_transition = Transition()
query_transition.data = transitions_data
query_transition.save()
# Keep track of the ids
TestQueryClass.transition_ids.append(query_transition.id)
示例3: read_legacy_project_file
# 需要導入模塊: from classes.query import Clip [as 別名]
# 或者: from classes.query.Clip import save [as 別名]
def read_legacy_project_file(self, file_path):
"""Attempt to read a legacy version 1.x openshot project file"""
import sys, pickle
from classes.query import File, Track, Clip, Transition
from classes.app import get_app
import openshot
try:
import json
except ImportError:
import simplejson as json
# Get translation method
_ = get_app()._tr
# Append version info
v = openshot.GetVersion()
project_data = {}
project_data["version"] = {"openshot-qt" : info.VERSION,
"libopenshot" : v.ToString()}
# Get FPS from project
from classes.app import get_app
fps = get_app().project.get(["fps"])
fps_float = float(fps["num"]) / float(fps["den"])
# Import legacy openshot classes (from version 1.X)
from classes.legacy.openshot import classes as legacy_classes
from classes.legacy.openshot.classes import project as legacy_project
from classes.legacy.openshot.classes import sequences as legacy_sequences
from classes.legacy.openshot.classes import track as legacy_track
from classes.legacy.openshot.classes import clip as legacy_clip
from classes.legacy.openshot.classes import keyframe as legacy_keyframe
from classes.legacy.openshot.classes import files as legacy_files
from classes.legacy.openshot.classes import transition as legacy_transition
from classes.legacy.openshot.classes import effect as legacy_effect
from classes.legacy.openshot.classes import marker as legacy_marker
sys.modules['openshot.classes'] = legacy_classes
sys.modules['classes.project'] = legacy_project
sys.modules['classes.sequences'] = legacy_sequences
sys.modules['classes.track'] = legacy_track
sys.modules['classes.clip'] = legacy_clip
sys.modules['classes.keyframe'] = legacy_keyframe
sys.modules['classes.files'] = legacy_files
sys.modules['classes.transition'] = legacy_transition
sys.modules['classes.effect'] = legacy_effect
sys.modules['classes.marker'] = legacy_marker
# Keep track of files that failed to load
failed_files = []
with open(file_path.encode('UTF-8'), 'rb') as f:
try:
# Unpickle legacy openshot project file
v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8")
file_lookup = {}
# Loop through files
for item in v1_data.project_folder.items:
# Is this item a File (i.e. ignore folders)
if isinstance(item, legacy_files.OpenShotFile):
# Create file
try:
clip = openshot.Clip(item.name)
reader = clip.Reader()
file_data = json.loads(reader.Json(), strict=False)
# Determine media type
if file_data["has_video"] and not self.is_image(file_data):
file_data["media_type"] = "video"
elif file_data["has_video"] and self.is_image(file_data):
file_data["media_type"] = "image"
elif file_data["has_audio"] and not file_data["has_video"]:
file_data["media_type"] = "audio"
# Save new file to the project data
file = File()
file.data = file_data
file.save()
# Keep track of new ids and old ids
file_lookup[item.unique_id] = file
except:
# Handle exception quietly
msg = ("%s is not a valid video, audio, or image file." % item.name)
log.error(msg)
failed_files.append(item.name)
# Delete all tracks
track_list = copy.deepcopy(Track.filter())
for track in track_list:
track.delete()
# Create new tracks
track_counter = 0
for legacy_t in reversed(v1_data.sequences[0].tracks):
t = Track()
t.data = {"number": track_counter, "y": 0, "label": legacy_t.name}
t.save()
#.........這裏部分代碼省略.........
示例4: accept
# 需要導入模塊: from classes.query import Clip [as 別名]
# 或者: from classes.query.Clip import save [as 別名]
#.........這裏部分代碼省略.........
# Scale animation
start_scale = uniform(0.5, 1.5)
end_scale = uniform(0.85, 1.15)
elif zoom_value == "Zoom In":
animate_start_x = 0.0
animate_end_x = 0.0
animate_start_y = 0.0
animate_end_y = 0.0
# Scale animation
start_scale = 1.0
end_scale = 1.25
elif zoom_value == "Zoom Out":
animate_start_x = 0.0
animate_end_x = 0.0
animate_start_y = 0.0
animate_end_y = 0.0
# Scale animation
start_scale = 1.25
end_scale = 1.0
# Add keyframes
start = openshot.Point((start_time * fps_float) + 1, start_scale, openshot.BEZIER)
start_object = json.loads(start.Json())
end = openshot.Point(end_time * fps_float, end_scale, openshot.BEZIER)
end_object = json.loads(end.Json())
new_clip["gravity"] = openshot.GRAVITY_CENTER
new_clip["scale_x"]["Points"].append(start_object)
new_clip["scale_x"]["Points"].append(end_object)
new_clip["scale_y"]["Points"].append(start_object)
new_clip["scale_y"]["Points"].append(end_object)
# Add keyframes
start_x = openshot.Point((start_time * fps_float) + 1, animate_start_x, openshot.BEZIER)
start_x_object = json.loads(start_x.Json())
end_x = openshot.Point(end_time * fps_float, animate_end_x, openshot.BEZIER)
end_x_object = json.loads(end_x.Json())
start_y = openshot.Point((start_time * fps_float) + 1, animate_start_y, openshot.BEZIER)
start_y_object = json.loads(start_y.Json())
end_y = openshot.Point(end_time * fps_float, animate_end_y, openshot.BEZIER)
end_y_object = json.loads(end_y.Json())
new_clip["gravity"] = openshot.GRAVITY_CENTER
new_clip["location_x"]["Points"].append(start_x_object)
new_clip["location_x"]["Points"].append(end_x_object)
new_clip["location_y"]["Points"].append(start_y_object)
new_clip["location_y"]["Points"].append(end_y_object)
if transition_path:
# Add transition for this clip (if any)
# Open up QtImageReader for transition Image
if random_transition:
random_index = randint(0, len(self.transitions))
transition_path = self.transitions[random_index]
# Get reader for transition
transition_reader = openshot.QtImageReader(transition_path)
brightness = openshot.Keyframe()
brightness.AddPoint(1, 1.0, openshot.BEZIER)
brightness.AddPoint(min(transition_length, end_time - start_time) * fps_float, -1.0, openshot.BEZIER)
contrast = openshot.Keyframe(3.0)
# Create transition dictionary
transitions_data = {
"layer": track_num,
"title": "Transition",
"type": "Mask",
"start": 0,
"end": min(transition_length, end_time - start_time),
"brightness": json.loads(brightness.Json()),
"contrast": json.loads(contrast.Json()),
"reader": json.loads(transition_reader.Json()),
"replace_image": False
}
# Overlap this clip with the previous one (if any)
position = max(start_position, position - transition_length)
transitions_data["position"] = position
new_clip["position"] = position
# Create transition
tran = Transition()
tran.data = transitions_data
tran.save()
# Save Clip
clip.data = new_clip
clip.save()
# Increment position by length of clip
position += (end_time - start_time)
# Accept dialog
super(AddToTimeline, self).accept()