本文整理汇总了Python中Red9_General类的典型用法代码示例。如果您正苦于以下问题:Python Red9_General类的具体用法?Python Red9_General怎么用?Python Red9_General使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Red9_General类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: poseSave
def poseSave(self, nodes, filepath=None, useFilter=True, storeThumbnail=True):
'''
Entry point for the generic PoseSave.
:param nodes: nodes to store the data against OR the rootNode if the
filter is active.
:param filepath: posefile to save - if not given the pose is cached on this
class instance.
:param useFilter: use the filterSettings or not.
'''
#push args to object - means that any poseHandler.py file has access to them
self.filepath=filepath
self.useFilter=useFilter
if self.filepath:
log.debug('PosePath given : %s' % filepath)
self.buildInternalPoseData(nodes)
if self.filepath:
self._writePose(filepath)
if storeThumbnail:
sel=cmds.ls(sl=True,l=True)
cmds.select(cl=True)
r9General.thumbNailScreen(filepath,self.thumbnailRes[0],self.thumbnailRes[1])
if sel:
cmds.select(sel)
log.info('Pose Saved Successfully to : %s' % filepath)
示例2: combineAudio
def combineAudio(self, filepath):
'''
Combine audio tracks into a single wav file. This by-passes
the issues with Maya not playblasting multip audio tracks.
:param filepath: filepath to store the combined audioTrack
TODO: Deal with offset start and end data + silence
'''
status=True
failed=[]
if not len(self.audioNodes)>1:
raise ValueError('We need more than 1 audio node in order to compile')
for audio in cmds.ls(type='audio'):
audioNode=AudioNode(audio)
if audioNode.path==filepath:
if audioNode.isCompiled:
log.info('Deleting currently compiled Audio Track : %s' % audioNode.path)
if audioNode in self.audioNodes:
self.audioNodes.remove(audioNode)
audioNode.delete()
break
else:
raise IOError('Combined Audio path is already imported into Maya')
frmrange = self.getOverallRange()
neg_adjustment=0
if frmrange[0] < 0:
neg_adjustment=frmrange[0]
duration = ((frmrange[1] + abs(neg_adjustment)) / r9General.getCurrentFPS()) * 1000
log.info('Audio BaseTrack duration = %f' % duration)
baseTrack = audio_segment.AudioSegment.silent(duration)
for audio in self.audioNodes:
if not os.path.exists(audio.path):
log.warning('Audio file not found! : "%s" == %s' % (audio.audioNode, audio.path))
status = False
failed.append(audio)
continue
sound = audio_segment.AudioSegment.from_wav(audio.path)
if sound.sample_width not in [1, 2, 4]:
log.warning('24bit Audio is NOT supported in Python audioop lib! : "%s" == %i' % (audio.audioNode, sound.sample_width))
status = False
failed.append(audio)
continue
insertFrame = (audio.startFrame + abs(neg_adjustment))
log.info('inserting sound : %s at %f adjusted to %f' % \
(audio.audioNode, audio.startFrame, insertFrame))
baseTrack = baseTrack.overlay(sound, position=(insertFrame / r9General.getCurrentFPS()) * 1000)
baseTrack.export(filepath, format="wav")
compiled=AudioNode(filepath=filepath)
compiled.importAndActivate()
compiled.stampCompiled(self.mayaNodes)
compiled.startFrame=neg_adjustment
if not status:
raise StandardError('combine completed with errors: see script Editor for details')
示例3: audioPathLoaded
def audioPathLoaded(filepath):
'''
return any soundNodes in Maya that point to the given audio path
'''
nodes = []
if not os.path.exists(filepath):
return nodes
for audio in cmds.ls(type='audio'):
if r9General.formatPath(cmds.getAttr('%s.filename' % audio)) == r9General.formatPath(filepath):
nodes.append(audio)
return nodes
示例4: timecode_to_milliseconds
def timecode_to_milliseconds(timecode, smpte=True, framerate=None):
'''
from a properly formatted timecode return it in milliseconds
r9Audio.timecode_to_milliseconds('09:00:00:00')
:param timecode: '09:00:00:20' as a string
:param smpte: calculate the milliseconds based on HH:MM:SS:FF (frames as last block)
:param framerate: only used if smpte=True, the framerate to use in the conversion,
default (None) uses the current scenes framerate
'''
if not framerate:
framerate=r9General.getCurrentFPS()
data = timecode.split(':')
if not len(data) ==4:
raise IOError('timecode should be in the format "09:00:00:00"')
if smpte and int(data[3])>framerate:
raise IOError('timecode is badly formatted, frameblock is greater than given framerate')
actual = int(data[0]) * 3600000
actual += int(data[1]) * 60000
actual += int(data[2]) * 1000
if smpte:
actual += (int(data[3]) * 1000) / float(framerate)
else:
actual += int(data[3])
return actual
示例5: startTime
def startTime(self):
'''
: PRO_PACK : Maya start time of the sound node in milliseconds
'''
if self.isLoaded:
return (self.startFrame / r9General.getCurrentFPS()) * 1000
return 0
示例6: startTime
def startTime(self):
'''
this is in milliseconds
'''
if self.isLoaded:
return (self.startFrame / r9General.getCurrentFPS()) * 1000
return 0
示例7: getLengthFromWav
def getLengthFromWav(self):
'''
This uses the wav itself bypassing the Maya handling, why?
In maya.standalone the audio isn't loaded correctly and always is of length 1!
'''
with contextlib.closing(wave.open(self.path,'r')) as f:
frames=f.getnframes()
rate=f.getframerate()
duration=frames/float(rate)
return (duration) * r9General.getCurrentFPS()
示例8: combineAudio
def combineAudio(self, filepath):
'''
Combine audio tracks into a single wav file. This by-passes
the issues with Maya not playblasting multip audio tracks.
@param filepath: filepath to store the combined audioTrack
TODO: Deal with offset start and end data + silence
'''
if not len(self.audioNodes)>1:
raise ValueError('We need more than 1 audio node in order to compile')
for audio in cmds.ls(type='audio'):
audioNode=AudioNode(audio)
if audioNode.path==filepath:
if audioNode.isCompiled:
log.info('Deleting currently compiled Audio Track : %s' % audioNode.path)
if audioNode in self.audioNodes:
self.audioNodes.remove(audioNode)
audioNode.delete()
break
else:
raise IOError('Combined Audio path is already imported into Maya')
frmrange = self.getOverallRange()
neg_adjustment=0
if frmrange[0] < 0:
neg_adjustment=frmrange[0]
duration = ((frmrange[1] + abs(neg_adjustment)) / r9General.getCurrentFPS()) * 1000
log.info('Audio BaseTrack duration = %f' % duration)
baseTrack = audio_segment.AudioSegment.silent(duration)
for audio in self.audioNodes:
sound = audio_segment.AudioSegment.from_wav(audio.path)
insertFrame = (audio.startFrame + abs(neg_adjustment))
log.info('inserting sound : %s at %f adjusted to %f' % \
(audio.audioNode, audio.startFrame, insertFrame))
baseTrack = baseTrack.overlay(sound, position=(insertFrame / r9General.getCurrentFPS()) * 1000)
baseTrack.export(filepath, format="wav")
compiled=AudioNode.importAndActivate(filepath)
compiled.stampCompiled(self.mayaNodes)
compiled.startFrame=neg_adjustment
示例9: milliseconds_to_frame
def milliseconds_to_frame(milliseconds, framerate=None):
'''
convert milliseconds into frames
:param milliseconds: time in milliseconds
:param framerate: when using smpte this is the framerate used in the FF block
default (None) uses the current scenes framerate
'''
if not framerate:
framerate=r9General.getCurrentFPS()
return (float(milliseconds) / 1000) * framerate
示例10: frame_to_milliseconds
def frame_to_milliseconds(frame, framerate=None):
'''
from a given frame return that time in milliseconds
relative to the given framerate
:param frame: current frame in Maya
:param framerate: only used if smpte=True, the framerate to use in the conversion,
default (None) uses the current scenes framerate
'''
if not framerate:
framerate=r9General.getCurrentFPS()
return (frame / float(framerate)) * 1000
示例11: milliseconds_to_Timecode
def milliseconds_to_Timecode(milliseconds, smpte=True, framerate=None):
'''
convert milliseconds into correctly formatted timecode
:param milliseconds: time in milliseconds
:param smpte: format the timecode HH:MM:SS:FF where FF is frames
:param framerate: when using smpte this is the framerate used in the FF block
default (None) uses the current scenes framerate
.. note::
* If smpte = False : the format will be HH:MM:SS:MSS = hours, minutes, seconds, milliseconds
* If smpte = True : the format will be HH:MM:SS:FF = hours, minutes, seconds, frames
'''
def __zeropad(value):
if value<10:
return '0%s' % value
else:
return value
if not framerate:
framerate=r9General.getCurrentFPS()
if milliseconds > 3600000:
hours = int(math.floor(milliseconds / 3600000))
milliseconds -= (hours * 3600000)
else:
hours = 0
if milliseconds > 60000:
minutes = int(math.floor(milliseconds / 60000))
milliseconds -= (minutes * 60000)
else:
minutes = 0
if milliseconds > 1000:
seconds = int(math.floor(milliseconds / 1000))
milliseconds -= (seconds * 1000)
else:
seconds = 0
frame = int(math.floor(milliseconds))
if smpte:
frame = int(math.ceil((float(frame)/1000) * float(framerate)))
return "{0}:{1}:{2}:{3}".format(__zeropad(hours),
__zeropad(minutes),
__zeropad(seconds),
__zeropad(frame))
示例12: openAudioPath
def openAudioPath(self):
path=self.path
if path and os.path.exists(path):
r9General.os_OpenFileDirectory(path)
示例13: endTime
def endTime(self):
'''
this is in milliseconds
'''
return (self.endFrame / r9General.getCurrentFPS()) * 1000
示例14: endTime
def endTime(self):
'''
: PRO_PACK : Maya end time of the sound node in milliseconds
'''
return (self.endFrame / r9General.getCurrentFPS()) * 1000