本文整理汇总了Python中classes.query.Clip.data方法的典型用法代码示例。如果您正苦于以下问题:Python Clip.data方法的具体用法?Python Clip.data怎么用?Python Clip.data使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类classes.query.Clip
的用法示例。
在下文中一共展示了Clip.data方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_add_clip
# 需要导入模块: from classes.query import Clip [as 别名]
# 或者: from classes.query.Clip import data [as 别名]
def test_add_clip(self):
""" Test the Clip.save method by adding multiple clips """
# Import additional classes that need the app defined first
from classes.query import Clip
# Find number of clips in project
num_clips = len(Clip.filter())
# Create clip
c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png"))
# Parse JSON
clip_data = json.loads(c.Json())
# Insert into project data
query_clip = Clip()
query_clip.data = clip_data
query_clip.save()
self.assertTrue(query_clip)
self.assertEqual(len(Clip.filter()), num_clips + 1)
# Save the clip again (which should not change the total # of clips)
query_clip.save()
self.assertEqual(len(Clip.filter()), num_clips + 1)
示例2: setUpClass
# 需要导入模块: from classes.query import Clip [as 别名]
# 或者: from classes.query.Clip import data [as 别名]
def setUpClass(TestQueryClass):
""" Init unit test data """
# Create Qt application
TestQueryClass.app = OpenShotApp(sys.argv, mode="unittest")
TestQueryClass.clip_ids = []
TestQueryClass.file_ids = []
TestQueryClass.transition_ids = []
# Import additional classes that need the app defined first
from classes.query import Clip, File, Transition
# Insert some clips into the project data
for num in range(5):
# Create clip
c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png"))
# Parse JSON
clip_data = json.loads(c.Json())
# Insert into project data
query_clip = Clip()
query_clip.data = clip_data
query_clip.save()
# Keep track of the ids
TestQueryClass.clip_ids.append(query_clip.id)
# Insert some files into the project data
for num in range(5):
# Create file
r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0)
# Parse JSON
file_data = json.loads(r.Json())
# Insert into project data
query_file = File()
query_file.data = file_data
query_file.data["path"] = os.path.join(info.IMAGES_PATH, "AboutLogo.png")
query_file.data["media_type"] = "image"
query_file.save()
# Keep track of the ids
TestQueryClass.file_ids.append(query_file.id)
# Insert some transitions into the project data
for num in range(5):
# Create mask object
transition_object = openshot.Mask()
transitions_data = json.loads(transition_object.Json())
# Insert into project data
query_transition = Transition()
query_transition.data = transitions_data
query_transition.save()
# Keep track of the ids
TestQueryClass.transition_ids.append(query_transition.id)
示例3: read_legacy_project_file
# 需要导入模块: from classes.query import Clip [as 别名]
# 或者: from classes.query.Clip import data [as 别名]
def read_legacy_project_file(self, file_path):
"""Attempt to read a legacy version 1.x openshot project file"""
import sys, pickle
from classes.query import File, Track, Clip, Transition
from classes.app import get_app
import openshot
try:
import json
except ImportError:
import simplejson as json
# Get translation method
_ = get_app()._tr
# Append version info
v = openshot.GetVersion()
project_data = {}
project_data["version"] = {"openshot-qt" : info.VERSION,
"libopenshot" : v.ToString()}
# Get FPS from project
from classes.app import get_app
fps = get_app().project.get(["fps"])
fps_float = float(fps["num"]) / float(fps["den"])
# Import legacy openshot classes (from version 1.X)
from classes.legacy.openshot import classes as legacy_classes
from classes.legacy.openshot.classes import project as legacy_project
from classes.legacy.openshot.classes import sequences as legacy_sequences
from classes.legacy.openshot.classes import track as legacy_track
from classes.legacy.openshot.classes import clip as legacy_clip
from classes.legacy.openshot.classes import keyframe as legacy_keyframe
from classes.legacy.openshot.classes import files as legacy_files
from classes.legacy.openshot.classes import transition as legacy_transition
from classes.legacy.openshot.classes import effect as legacy_effect
from classes.legacy.openshot.classes import marker as legacy_marker
sys.modules['openshot.classes'] = legacy_classes
sys.modules['classes.project'] = legacy_project
sys.modules['classes.sequences'] = legacy_sequences
sys.modules['classes.track'] = legacy_track
sys.modules['classes.clip'] = legacy_clip
sys.modules['classes.keyframe'] = legacy_keyframe
sys.modules['classes.files'] = legacy_files
sys.modules['classes.transition'] = legacy_transition
sys.modules['classes.effect'] = legacy_effect
sys.modules['classes.marker'] = legacy_marker
# Keep track of files that failed to load
failed_files = []
with open(file_path.encode('UTF-8'), 'rb') as f:
try:
# Unpickle legacy openshot project file
v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8")
file_lookup = {}
# Loop through files
for item in v1_data.project_folder.items:
# Is this item a File (i.e. ignore folders)
if isinstance(item, legacy_files.OpenShotFile):
# Create file
try:
clip = openshot.Clip(item.name)
reader = clip.Reader()
file_data = json.loads(reader.Json(), strict=False)
# Determine media type
if file_data["has_video"] and not self.is_image(file_data):
file_data["media_type"] = "video"
elif file_data["has_video"] and self.is_image(file_data):
file_data["media_type"] = "image"
elif file_data["has_audio"] and not file_data["has_video"]:
file_data["media_type"] = "audio"
# Save new file to the project data
file = File()
file.data = file_data
file.save()
# Keep track of new ids and old ids
file_lookup[item.unique_id] = file
except:
# Handle exception quietly
msg = ("%s is not a valid video, audio, or image file." % item.name)
log.error(msg)
failed_files.append(item.name)
# Delete all tracks
track_list = copy.deepcopy(Track.filter())
for track in track_list:
track.delete()
# Create new tracks
track_counter = 0
for legacy_t in reversed(v1_data.sequences[0].tracks):
t = Track()
t.data = {"number": track_counter, "y": 0, "label": legacy_t.name}
t.save()
#.........这里部分代码省略.........
示例4: accept
# 需要导入模块: from classes.query import Clip [as 别名]
# 或者: from classes.query.Clip import data [as 别名]
def accept(self):
""" Ok button clicked """
log.info('accept')
# Get settings from form
start_position = self.txtStartTime.value()
track_num = self.cmbTrack.currentData()
fade_value = self.cmbFade.currentData()
fade_length = self.txtFadeLength.value()
transition_path = self.cmbTransition.currentData()
transition_length = self.txtTransitionLength.value()
image_length = self.txtImageLength.value()
zoom_value = self.cmbZoom.currentData()
# Init position
position = start_position
random_transition = False
if transition_path == "random":
random_transition = True
# Get frames per second
fps = get_app().project.get(["fps"])
fps_float = float(fps["num"]) / float(fps["den"])
# Loop through each file (in the current order)
for file in self.treeFiles.timeline_model.files:
# Create a clip
clip = Clip()
clip.data = {}
if (file.data["media_type"] == "video" or file.data["media_type"] == "image"):
# Determine thumb path
thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"])
else:
# Audio file
thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png")
# Get file name
path, filename = os.path.split(file.data["path"])
# Convert path to the correct relative path (based on this folder)
file_path = file.absolute_path()
# Create clip object for this file
c = openshot.Clip(file_path)
# Append missing attributes to Clip JSON
new_clip = json.loads(c.Json())
new_clip["position"] = position
new_clip["layer"] = track_num
new_clip["file_id"] = file.id
new_clip["title"] = filename
new_clip["image"] = thumb_path
# Overwrite frame rate (incase the user changed it in the File Properties)
file_properties_fps = float(file.data["fps"]["num"]) / float(file.data["fps"]["den"])
file_fps = float(new_clip["reader"]["fps"]["num"]) / float(new_clip["reader"]["fps"]["den"])
fps_diff = file_fps / file_properties_fps
new_clip["reader"]["fps"]["num"] = file.data["fps"]["num"]
new_clip["reader"]["fps"]["den"] = file.data["fps"]["den"]
# Scale duration / length / and end properties
new_clip["reader"]["duration"] *= fps_diff
new_clip["end"] *= fps_diff
new_clip["duration"] *= fps_diff
# Check for optional start and end attributes
start_time = 0
end_time = new_clip["reader"]["duration"]
if 'start' in file.data.keys():
start_time = file.data['start']
new_clip["start"] = start_time
if 'end' in file.data.keys():
end_time = file.data['end']
new_clip["end"] = end_time
# Adjust clip duration, start, and end
new_clip["duration"] = new_clip["reader"]["duration"]
if file.data["media_type"] == "image":
end_time = image_length
new_clip["end"] = end_time
# Adjust Fade of Clips (if no transition is chosen)
if not transition_path:
if fade_value != None:
# Overlap this clip with the previous one (if any)
position = max(start_position, new_clip["position"] - fade_length)
new_clip["position"] = position
if fade_value == 'Fade In' or fade_value == 'Fade In & Out':
start = openshot.Point((start_time * fps_float) + 1, 0.0, openshot.BEZIER)
start_object = json.loads(start.Json())
end = openshot.Point(min((start_time + fade_length) * fps_float, end_time * fps_float), 1.0, openshot.BEZIER)
end_object = json.loads(end.Json())
new_clip['alpha']["Points"].append(start_object)
new_clip['alpha']["Points"].append(end_object)
if fade_value == 'Fade Out' or fade_value == 'Fade In & Out':
start = openshot.Point(max((end_time * fps_float) - (fade_length * fps_float), start_time * fps_float), 1.0, openshot.BEZIER)
#.........这里部分代码省略.........