当前位置: 首页>>代码示例>>Python>>正文


Python FilenameBuilder.fill方法代码示例

本文整理汇总了Python中mediagoblin.processing.FilenameBuilder.fill方法的典型用法代码示例。如果您正苦于以下问题:Python FilenameBuilder.fill方法的具体用法?Python FilenameBuilder.fill怎么用?Python FilenameBuilder.fill使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mediagoblin.processing.FilenameBuilder的用法示例。


在下文中一共展示了FilenameBuilder.fill方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: process_pdf

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
def process_pdf(proc_state):
    """Code to process a pdf file. Will be run by celery.

    A Workbench() represents a local tempory dir. It is automatically
    cleaned up when this function exits.
    """
    entry = proc_state.entry
    workbench = proc_state.workbench

    queued_filename = proc_state.get_queued_filename()
    name_builder = FilenameBuilder(queued_filename)

    # Copy our queued local workbench to its final destination
    original_dest = name_builder.fill('{basename}{ext}')
    proc_state.copy_original(original_dest)

    # Create a pdf if this is a different doc, store pdf for viewer
    ext = queued_filename.rsplit('.', 1)[-1].lower()
    if ext == 'pdf':
        pdf_filename = queued_filename
    else:
        pdf_filename = queued_filename.rsplit('.', 1)[0] + '.pdf'
        unoconv = where('unoconv')
        call(executable=unoconv,
             args=[unoconv, '-v', '-f', 'pdf', queued_filename])
        if not os.path.exists(pdf_filename):
            _log.debug('unoconv failed to convert file to pdf')
            raise BadMediaFail()
        proc_state.store_public(keyname=u'pdf', local_file=pdf_filename)

    pdf_info_dict = pdf_info(pdf_filename)

    for name, width, height in [
        (u'thumb', mgg.global_config['media:thumb']['max_width'],
                   mgg.global_config['media:thumb']['max_height']),
        (u'medium', mgg.global_config['media:medium']['max_width'],
                   mgg.global_config['media:medium']['max_height']),
        ]:
        filename = name_builder.fill('{basename}.%s.png' % name)
        path = workbench.joinpath(filename)
        create_pdf_thumb(pdf_filename, path, width, height)
        assert(os.path.exists(path))
        proc_state.store_public(keyname=name, local_file=path)

    proc_state.delete_queue_file()

    entry.media_data_init(**pdf_info_dict)
    entry.save()
开发者ID:praveen97uma,项目名称:goblin,代码行数:50,代码来源:processing.py

示例2: process_image

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
def process_image(proc_state):
    """Code to process an image. Will be run by celery.

    A Workbench() represents a local tempory dir. It is automatically
    cleaned up when this function exits.
    """
    entry = proc_state.entry
    workbench = proc_state.workbench

    # Conversions subdirectory to avoid collisions
    conversions_subdir = os.path.join(
        workbench.dir, 'conversions')
    os.mkdir(conversions_subdir)

    queued_filename = proc_state.get_queued_filename()
    name_builder = FilenameBuilder(queued_filename)

    # EXIF extraction
    exif_tags = extract_exif(queued_filename)
    gps_data = get_gps_data(exif_tags)

    # Always create a small thumbnail
    resize_tool(proc_state, True, 'thumb',
                name_builder.fill('{basename}.thumbnail{ext}'),
                conversions_subdir, exif_tags)

    # Possibly create a medium
    resize_tool(proc_state, False, 'medium',
                name_builder.fill('{basename}.medium{ext}'),
                conversions_subdir, exif_tags)

    # Copy our queued local workbench to its final destination
    proc_state.copy_original(name_builder.fill('{basename}{ext}'))

    # Remove queued media file from storage and database
    proc_state.delete_queue_file()

    # Insert exif data into database
    exif_all = clean_exif(exif_tags)

    if len(exif_all):
        entry.media_data_init(exif_all=exif_all)

    if len(gps_data):
        for key in list(gps_data.keys()):
            gps_data['gps_' + key] = gps_data.pop(key)
        entry.media_data_init(**gps_data)
开发者ID:praveen97uma,项目名称:goblin,代码行数:49,代码来源:processing.py

示例3: CommonVideoProcessor

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
class CommonVideoProcessor(MediaProcessor):
    """
    Provides a base for various video processing steps
    """
    acceptable_files = ['original', 'best_quality', 'webm_video']

    def common_setup(self):
        self.video_config = mgg \
            .global_config['plugins'][MEDIA_TYPE]

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self.transcoder = transcoders.VideoTranscoder()
        self.did_transcode = False

    def copy_original(self):
        # If we didn't transcode, then we need to keep the original
        if not self.did_transcode or \
           (self.video_config['keep_original'] and self.did_transcode):
            copy_original(
                self.entry, self.process_filename,
                self.name_builder.fill('{basename}{ext}'))

    def _keep_best(self):
        """
        If there is no original, keep the best file that we have
        """
        if not self.entry.media_files.get('best_quality'):
            # Save the best quality file if no original?
            if not self.entry.media_files.get('original') and \
                    self.entry.media_files.get('webm_video'):
                self.entry.media_files['best_quality'] = self.entry \
                    .media_files['webm_video']

    def _skip_processing(self, keyname, **kwargs):
        file_metadata = self.entry.get_file_metadata(keyname)

        if not file_metadata:
            return False
        skip = True

        if keyname == 'webm_video':
            if kwargs.get('medium_size') != file_metadata.get('medium_size'):
                skip = False
            elif kwargs.get('vp8_quality') != file_metadata.get('vp8_quality'):
                skip = False
            elif kwargs.get('vp8_threads') != file_metadata.get('vp8_threads'):
                skip = False
            elif kwargs.get('vorbis_quality') != \
                    file_metadata.get('vorbis_quality'):
                skip = False
        elif keyname == 'thumb':
            if kwargs.get('thumb_size') != file_metadata.get('thumb_size'):
                skip = False

        return skip


    def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
                  vorbis_quality=None):
        progress_callback = ProgressCallback(self.entry)
        tmp_dst = os.path.join(self.workbench.dir,
                               self.name_builder.fill('{basename}.medium.webm'))

        if not medium_size:
            medium_size = (
                mgg.global_config['media:medium']['max_width'],
                mgg.global_config['media:medium']['max_height'])
        if not vp8_quality:
            vp8_quality = self.video_config['vp8_quality']
        if not vp8_threads:
            vp8_threads = self.video_config['vp8_threads']
        if not vorbis_quality:
            vorbis_quality = self.video_config['vorbis_quality']

        file_metadata = {'medium_size': medium_size,
                         'vp8_threads': vp8_threads,
                         'vp8_quality': vp8_quality,
                         'vorbis_quality': vorbis_quality}

        if self._skip_processing('webm_video', **file_metadata):
            return

        # Extract metadata and keep a record of it
        metadata = transcoders.discover(self.process_filename)

        # metadata's stream info here is a DiscovererContainerInfo instance,
        # it gets split into DiscovererAudioInfo and DiscovererVideoInfo;
        # metadata itself has container-related data in tags, like video-codec
        store_metadata(self.entry, metadata)

        orig_dst_dimensions = (metadata.get_video_streams()[0].get_width(),
                metadata.get_video_streams()[0].get_height())

        # Figure out whether or not we need to transcode this video or
        # if we can skip it
        if skip_transcode(metadata, medium_size):
#.........这里部分代码省略.........
开发者ID:ausbin,项目名称:mediagoblin,代码行数:103,代码来源:processing.py

示例4: CommonImageProcessor

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
class CommonImageProcessor(MediaProcessor):
    """
    Provides a base for various media processing steps
    """
    # list of acceptable file keys in order of prefrence for reprocessing
    acceptable_files = ['original', 'medium']

    def common_setup(self):
        """
        Set up the workbench directory and pull down the original file
        """
        self.image_config = mgg.global_config[
            'media_type:mediagoblin.media_types.image']

        ## @@: Should this be two functions?
        # Conversions subdirectory to avoid collisions
        self.conversions_subdir = os.path.join(
            self.workbench.dir, 'convirsions')
        os.mkdir(self.conversions_subdir)

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        # Exif extraction
        self.exif_tags = extract_exif(self.process_filename)

    def generate_medium_if_applicable(self, size=None, quality=None,
                                      filter=None):
        if not quality:
            quality = self.image_config['quality']
        if not filter:
            filter = self.image_config['resize_filter']

        resize_tool(self.entry, False, 'medium', self.process_filename,
                    self.name_builder.fill('{basename}.medium{ext}'),
                    self.conversions_subdir, self.exif_tags, quality,
                    filter, size)

    def generate_thumb(self, size=None, quality=None, filter=None):
        if not quality:
            quality = self.image_config['quality']
        if not filter:
            filter = self.image_config['resize_filter']

        resize_tool(self.entry, True, 'thumb', self.process_filename,
                    self.name_builder.fill('{basename}.thumbnail{ext}'),
                    self.conversions_subdir, self.exif_tags, quality,
                    filter, size)

    def copy_original(self):
        copy_original(
            self.entry, self.process_filename,
            self.name_builder.fill('{basename}{ext}'))

    def extract_metadata(self):
        # Is there any GPS data
        gps_data = get_gps_data(self.exif_tags)

        # Insert exif data into database
        exif_all = clean_exif(self.exif_tags)

        if len(exif_all):
            self.entry.media_data_init(exif_all=exif_all)

        if len(gps_data):
            for key in list(gps_data.keys()):
                gps_data['gps_' + key] = gps_data.pop(key)
            self.entry.media_data_init(**gps_data)
开发者ID:spaetz,项目名称:mediagoblin_blog,代码行数:72,代码来源:processing.py

示例5: process_audio

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
def process_audio(entry):
    audio_config = mgg.global_config['media_type:mediagoblin.media_types.audio']

    workbench = mgg.workbench_manager.create_workbench()

    queued_filepath = entry.queued_media_file
    queued_filename = workbench.localized_file(
        mgg.queue_store, queued_filepath,
        'source')
    name_builder = FilenameBuilder(queued_filename)

    webm_audio_filepath = create_pub_filepath(
        entry,
        '{original}.webm'.format(
            original=os.path.splitext(
                queued_filepath[-1])[0]))

    if audio_config['keep_original']:
        with open(queued_filename, 'rb') as queued_file:
            original_filepath = create_pub_filepath(
                entry, name_builder.fill('{basename}{ext}'))

            with mgg.public_store.get_file(original_filepath, 'wb') as \
                    original_file:
                _log.debug('Saving original...')
                original_file.write(queued_file.read())

            entry.media_files['original'] = original_filepath

    transcoder = AudioTranscoder()

    with tempfile.NamedTemporaryFile() as webm_audio_tmp:
        progress_callback = ProgressCallback(entry)

        transcoder.transcode(
            queued_filename,
            webm_audio_tmp.name,
            quality=audio_config['quality'],
            progress_callback=progress_callback)

        transcoder.discover(webm_audio_tmp.name)

        _log.debug('Saving medium...')
        mgg.public_store.get_file(webm_audio_filepath, 'wb').write(
            webm_audio_tmp.read())

        entry.media_files['webm_audio'] = webm_audio_filepath

        # entry.media_data_init(length=int(data.audiolength))

    if audio_config['create_spectrogram']:
        spectrogram_filepath = create_pub_filepath(
            entry,
            '{original}-spectrogram.jpg'.format(
                original=os.path.splitext(
                    queued_filepath[-1])[0]))

        with tempfile.NamedTemporaryFile(suffix='.ogg') as wav_tmp:
            _log.info('Creating OGG source for spectrogram')
            transcoder.transcode(
                queued_filename,
                wav_tmp.name,
                mux_string='vorbisenc quality={0} ! oggmux'.format(
                    audio_config['quality']))

            thumbnailer = AudioThumbnailer()

            with tempfile.NamedTemporaryFile(suffix='.jpg') as spectrogram_tmp:
                thumbnailer.spectrogram(
                    wav_tmp.name,
                    spectrogram_tmp.name,
                    width=mgg.global_config['media:medium']['max_width'],
                    fft_size=audio_config['spectrogram_fft_size'])

                _log.debug('Saving spectrogram...')
                mgg.public_store.get_file(spectrogram_filepath, 'wb').write(
                    spectrogram_tmp.read())

                entry.media_files['spectrogram'] = spectrogram_filepath

                with tempfile.NamedTemporaryFile(suffix='.jpg') as thumb_tmp:
                    thumbnailer.thumbnail_spectrogram(
                        spectrogram_tmp.name,
                        thumb_tmp.name,
                        (mgg.global_config['media:thumb']['max_width'],
                         mgg.global_config['media:thumb']['max_height']))

                    thumb_filepath = create_pub_filepath(
                        entry,
                        '{original}-thumbnail.jpg'.format(
                            original=os.path.splitext(
                                queued_filepath[-1])[0]))

                    mgg.public_store.get_file(thumb_filepath, 'wb').write(
                        thumb_tmp.read())

                    entry.media_files['thumb'] = thumb_filepath
    else:
        entry.media_files['thumb'] = ['fake', 'thumb', 'path.jpg']

#.........这里部分代码省略.........
开发者ID:imclab,项目名称:mediagoblin,代码行数:103,代码来源:processing.py

示例6: CommonSvgProcessor

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
class CommonSvgProcessor(MediaProcessor):
    """
    Provides a base for various svg processing steps
    """
    acceptable_files = ['original']

    def common_setup(self):
        """
        Set up the workbench directory and pull down the original file
        """
        self.svg_config = mgg.global_config['plugins']['mediagoblin_svg']

        # Conversions subdirectory to avoid collisions
        self.conversions_subdir = os.path.join(
            self.workbench.dir, 'conversions')
        os.mkdir(self.conversions_subdir)

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

    def generate_preview(self, size=None):
        if not size:
            size = (mgg.global_config['media:medium']['max_width'],
                    mgg.global_config['media:medium']['max_height'])

        if self.svg_config['svg_previews']:
            # delete existing thumbnail, if it doesn't match the original
            if self.entry.media_files.has_key('preview') and \
               self.entry.media_files['preview'] != self.entry.media_files['original']:
                mgg.public_store.delete_file(self.entry.media_files['preview'])
            self.entry.media_files['preview'] = self.entry.media_files.get('original')
        else:
            preview_filename = os.path.join(self.workbench.dir,
                self.name_builder.fill('{basename}.preview.png'))

            render_preview(self.process_filename, preview_filename, size)
            store_public(self.entry, 'preview', preview_filename,
                         self.name_builder.fill('{basename}.preview.png'))

    def generate_thumb(self, size=None):
        if not size:
            size = (mgg.global_config['media:thumb']['max_width'],
                    mgg.global_config['media:thumb']['max_height'])

        if self.svg_config['svg_thumbnails']:
            # delete existing thumbnail, if it doesn't match the original
            if self.entry.media_files.has_key('thumb') and \
               self.entry.media_files['thumb'] != self.entry.media_files['original']:
                mgg.public_store.delete_file(self.entry.media_files['thumb'])
            self.entry.media_files['thumb'] = self.entry.media_files.get('original')
        else:
            thumb_filename = os.path.join(self.workbench.dir,
                self.name_builder.fill('{basename}.thumbnail.png'))
            
            render_preview(self.process_filename, thumb_filename, size)
            store_public(self.entry, 'thumb', thumb_filename)

    def copy_original(self):
        copy_original(
            self.entry, self.process_filename,
            self.name_builder.fill('{basename}{ext}'))
开发者ID:commonsmachinery,项目名称:mediagoblin_svg,代码行数:65,代码来源:processing.py

示例7: CommonPdfProcessor

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
class CommonPdfProcessor(MediaProcessor):
    """
    Provides a base for various pdf processing steps
    """
    acceptable_files = ['original', 'pdf']

    def common_setup(self):
        """
        Set up common pdf processing steps
        """
        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self._set_pdf_filename()

    def _set_pdf_filename(self):
        if self.name_builder.ext == '.pdf':
            self.pdf_filename = self.process_filename
        elif self.entry.media_files.get('pdf'):
            self.pdf_filename = self.workbench.localized_file(
                mgg.public_store, self.entry.media_files['pdf'])
        else:
            self.pdf_filename = self._generate_pdf()

    def _skip_processing(self, keyname, **kwargs):
        file_metadata = self.entry.get_file_metadata(keyname)
        skip = True

        if not file_metadata:
            return False

        if keyname == 'thumb':
            if kwargs.get('thumb_size') != file_metadata.get('thumb_size'):
                skip = False
        elif keyname == 'medium':
            if kwargs.get('size') != file_metadata.get('size'):
                skip = False

        return skip

    def copy_original(self):
        copy_original(
            self.entry, self.process_filename,
            self.name_builder.fill('{basename}{ext}'))

    def generate_thumb(self, thumb_size=None):
        if not thumb_size:
            thumb_size = (mgg.global_config['media:thumb']['max_width'],
                          mgg.global_config['media:thumb']['max_height'])

        if self._skip_processing('thumb', thumb_size=thumb_size):
            return

        # Note: pdftocairo adds '.png', so don't include an ext
        thumb_filename = os.path.join(self.workbench.dir,
                                      self.name_builder.fill(
                                          '{basename}.thumbnail'))

        executable = where('pdftocairo')
        args = [executable, '-scale-to', str(min(thumb_size)),
                '-singlefile', '-png', self.pdf_filename, thumb_filename]

        _log.debug('calling {0}'.format(repr(' '.join(args))))
        Popen(executable=executable, args=args).wait()

        # since pdftocairo added '.png', we need to include it with the
        # filename
        store_public(self.entry, 'thumb', thumb_filename + '.png',
                     self.name_builder.fill('{basename}.thumbnail.png'))

        self.entry.set_file_metadata('thumb', thumb_size=thumb_size)

    def _generate_pdf(self):
        """
        Store the pdf. If the file is not a pdf, make it a pdf
        """
        tmp_pdf = os.path.splitext(self.process_filename)[0] + '.pdf'

        unoconv = where('unoconv')
        args = [unoconv, '-v', '-f', 'pdf', self.process_filename]
        _log.debug('calling %s' % repr(args))
        Popen(executable=unoconv,
              args=args).wait()

        if not os.path.exists(tmp_pdf):
            _log.debug('unoconv failed to convert file to pdf')
            raise BadMediaFail()

        store_public(self.entry, 'pdf', tmp_pdf,
                     self.name_builder.fill('{basename}.pdf'))

        return self.workbench.localized_file(
            mgg.public_store, self.entry.media_files['pdf'])

    def extract_pdf_info(self):
        pdf_info_dict = pdf_info(self.pdf_filename)
        self.entry.media_data_init(**pdf_info_dict)

#.........这里部分代码省略.........
开发者ID:ausbin,项目名称:mediagoblin,代码行数:103,代码来源:processing.py

示例8: CommonVideoProcessor

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
class CommonVideoProcessor(MediaProcessor):
    """
    Provides a base for various video processing steps
    """
    acceptable_files = ['original', 'best_quality', 'webm_video']

    def common_setup(self):
        self.video_config = mgg \
            .global_config['media_type:mediagoblin.media_types.video']

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self.transcoder = transcoders.VideoTranscoder()
        self.did_transcode = False

    def copy_original(self):
        # If we didn't transcode, then we need to keep the original
        if not self.did_transcode or \
           (self.video_config['keep_original'] and self.did_transcode):
            copy_original(
                self.entry, self.process_filename,
                self.name_builder.fill('{basename}{ext}'))

    def _keep_best(self):
        """
        If there is no original, keep the best file that we have
        """
        if not self.entry.media_files.get('best_quality'):
            # Save the best quality file if no original?
            if not self.entry.media_files.get('original') and \
                    self.entry.media_files.get('webm_video'):
                self.entry.media_files['best_quality'] = self.entry \
                    .media_files['webm_video']


    def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
                  vorbis_quality=None):
        progress_callback = ProgressCallback(self.entry)
        tmp_dst = os.path.join(self.workbench.dir,
                               self.name_builder.fill('{basename}.medium.webm'))

        if not medium_size:
            medium_size = (
                mgg.global_config['media:medium']['max_width'],
                mgg.global_config['media:medium']['max_height'])
        if not vp8_quality:
            vp8_quality = self.video_config['vp8_quality']
        if not vp8_threads:
            vp8_threads = self.video_config['vp8_threads']
        if not vorbis_quality:
            vorbis_quality = self.video_config['vorbis_quality']

        # Extract metadata and keep a record of it
        metadata = self.transcoder.discover(self.process_filename)
        store_metadata(self.entry, metadata)

        # Figure out whether or not we need to transcode this video or
        # if we can skip it
        if skip_transcode(metadata, medium_size):
            _log.debug('Skipping transcoding')

            dst_dimensions = metadata['videowidth'], metadata['videoheight']

            # If there is an original and transcoded, delete the transcoded
            # since it must be of lower quality then the original
            if self.entry.media_files.get('original') and \
               self.entry.media_files.get('webm_video'):
                self.entry.media_files['webm_video'].delete()

        else:
            self.transcoder.transcode(self.process_filename, tmp_dst,
                                      vp8_quality=vp8_quality,
                                      vp8_threads=vp8_threads,
                                      vorbis_quality=vorbis_quality,
                                      progress_callback=progress_callback,
                                      dimensions=tuple(medium_size))

            dst_dimensions = self.transcoder.dst_data.videowidth,\
                self.transcoder.dst_data.videoheight

            self._keep_best()

            # Push transcoded video to public storage
            _log.debug('Saving medium...')
            store_public(self.entry, 'webm_video', tmp_dst,
                         self.name_builder.fill('{basename}.medium.webm'))
            _log.debug('Saved medium')

            self.did_transcode = True

        # Save the width and height of the transcoded video
        self.entry.media_data_init(
            width=dst_dimensions[0],
            height=dst_dimensions[1])

    def generate_thumb(self, thumb_size=None):
        # Temporary file for the video thumbnail (cleaned up with workbench)
#.........这里部分代码省略.........
开发者ID:spaetz,项目名称:mediagoblin_blog,代码行数:103,代码来源:processing.py

示例9: CommonAudioProcessor

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
class CommonAudioProcessor(MediaProcessor):
    """
    Provides a base for various audio processing steps
    """
    acceptable_files = ['original', 'best_quality', 'webm_audio']

    def common_setup(self):
        """
        Setup the workbench directory and pull down the original file, add
        the audio_config, transcoder, thumbnailer and spectrogram_tmp path
        """
        self.audio_config = mgg \
            .global_config['plugins']['mediagoblin.media_types.audio']

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self.transcoder = AudioTranscoder()
        self.thumbnailer = AudioThumbnailer()

    def copy_original(self):
        if self.audio_config['keep_original']:
            copy_original(
                self.entry, self.process_filename,
                self.name_builder.fill('{basename}{ext}'))

    def _keep_best(self):
        """
        If there is no original, keep the best file that we have
        """
        if not self.entry.media_files.get('best_quality'):
            # Save the best quality file if no original?
            if not self.entry.media_files.get('original') and \
                    self.entry.media_files.get('webm_audio'):
                self.entry.media_files['best_quality'] = self.entry \
                    .media_files['webm_audio']

    def _skip_processing(self, keyname, **kwargs):
        file_metadata = self.entry.get_file_metadata(keyname)
        skip = True

        if not file_metadata:
            return False

        if keyname == 'webm_audio':
            if kwargs.get('quality') != file_metadata.get('quality'):
                skip = False
        elif keyname == 'spectrogram':
            if kwargs.get('max_width') != file_metadata.get('max_width'):
                skip = False
            elif kwargs.get('fft_size') != file_metadata.get('fft_size'):
                skip = False
        elif keyname == 'thumb':
            if kwargs.get('size') != file_metadata.get('size'):
                skip = False

        return skip

    def transcode(self, quality=None):
        if not quality:
            quality = self.audio_config['quality']

        if self._skip_processing('webm_audio', quality=quality):
            return

        progress_callback = ProgressCallback(self.entry)
        webm_audio_tmp = os.path.join(self.workbench.dir,
                                      self.name_builder.fill(
                                          '{basename}{ext}'))

        self.transcoder.transcode(
            self.process_filename,
            webm_audio_tmp,
            quality=quality,
            progress_callback=progress_callback)

        self._keep_best()

        _log.debug('Saving medium...')
        store_public(self.entry, 'webm_audio', webm_audio_tmp,
                     self.name_builder.fill('{basename}.medium.webm'))

        self.entry.set_file_metadata('webm_audio', **{'quality': quality})

    def create_spectrogram(self, max_width=None, fft_size=None):
        if not max_width:
            max_width = mgg.global_config['media:medium']['max_width']
        if not fft_size:
            fft_size = self.audio_config['spectrogram_fft_size']

        if self._skip_processing('spectrogram', max_width=max_width,
                                 fft_size=fft_size):
            return
        wav_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
            '{basename}.ogg'))
        _log.info('Creating OGG source for spectrogram')
        self.transcoder.transcode(self.process_filename, wav_tmp,
                                  mux_name='oggmux')
#.........这里部分代码省略.........
开发者ID:ausbin,项目名称:mediagoblin,代码行数:103,代码来源:processing.py

示例10: CommonStlProcessor

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
class CommonStlProcessor(MediaProcessor):
    """
    Provides a common base for various stl processing steps
    """
    acceptable_files = ['original']

    def common_setup(self):
        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self._set_ext()
        self._set_model()
        self._set_greatest()

    def _set_ext(self):
        ext = self.name_builder.ext[1:]

        if not ext:
            ext = None

        self.ext = ext

    def _set_model(self):
        """
        Attempt to parse the model file and divine some useful
        information about it.
        """
        with open(self.process_filename, 'rb') as model_file:
            self.model = model_loader.auto_detect(model_file, self.ext)

    def _set_greatest(self):
        greatest = [self.model.width, self.model.height, self.model.depth]
        greatest.sort()
        self.greatest = greatest[-1]

    def copy_original(self):
        copy_original(
            self.entry, self.process_filename,
            self.name_builder.fill('{basename}{ext}'))

    def _snap(self, keyname, name, camera, size, project="ORTHO"):
        filename = self.name_builder.fill(name)
        workbench_path = self.workbench.joinpath(filename)
        shot = {
            "model_path": self.process_filename,
            "model_ext": self.ext,
            "camera_coord": camera,
            "camera_focus": self.model.average,
            "camera_clip": self.greatest*10,
            "greatest": self.greatest,
            "projection": project,
            "width": size[0],
            "height": size[1],
            "out_file": workbench_path,
            }
        blender_render(shot)

        # make sure the image rendered to the workbench path
        assert os.path.exists(workbench_path)

        # copy it up!
        store_public(self.entry, keyname, workbench_path, filename)

    def generate_thumb(self, thumb_size=None):
        if not thumb_size:
            thumb_size = (mgg.global_config['media:thumb']['max_width'],
                          mgg.global_config['media:thumb']['max_height'])

        self._snap(
            "thumb",
            "{basename}.thumb.jpg",
            [0, self.greatest*-1.5, self.greatest],
            thumb_size,
            project="PERSP")

    def generate_perspective(self, size=None):
        if not size:
            size = (mgg.global_config['media:medium']['max_width'],
                    mgg.global_config['media:medium']['max_height'])

        self._snap(
            "perspective",
            "{basename}.perspective.jpg",
            [0, self.greatest*-1.5, self.greatest],
            size,
            project="PERSP")

    def generate_topview(self, size=None):
        if not size:
            size = (mgg.global_config['media:medium']['max_width'],
                    mgg.global_config['media:medium']['max_height'])

        self._snap(
            "top",
            "{basename}.top.jpg",
            [self.model.average[0], self.model.average[1],
             self.greatest*2],
            size)
#.........这里部分代码省略.........
开发者ID:spaetz,项目名称:mediagoblin_blog,代码行数:103,代码来源:processing.py

示例11: CommonImageProcessor

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
class CommonImageProcessor(MediaProcessor):
    """
    Provides a base for various media processing steps
    """
    # list of acceptable file keys in order of prefrence for reprocessing
    acceptable_files = ['original', 'medium']

    def common_setup(self):
        """
        Set up the workbench directory and pull down the original file
        """
        self.image_config = mgg.global_config['plugins'][
            'mediagoblin.media_types.image']

        ## @@: Should this be two functions?
        # Conversions subdirectory to avoid collisions
        self.conversions_subdir = os.path.join(
            self.workbench.dir, 'conversions')
        os.mkdir(self.conversions_subdir)

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        # Exif extraction
        self.exif_tags = extract_exif(self.process_filename)

    def generate_medium_if_applicable(self, size=None, quality=None,
                                      filter=None):
        if not quality:
            quality = self.image_config['quality']
        if not filter:
            filter = self.image_config['resize_filter']

        resize_tool(self.entry, False, 'medium', self.process_filename,
                    self.name_builder.fill('{basename}.medium{ext}'),
                    self.conversions_subdir, self.exif_tags, quality,
                    filter, size)

    def generate_thumb(self, size=None, quality=None, filter=None):
        if not quality:
            quality = self.image_config['quality']
        if not filter:
            filter = self.image_config['resize_filter']

        resize_tool(self.entry, True, 'thumb', self.process_filename,
                    self.name_builder.fill('{basename}.thumbnail{ext}'),
                    self.conversions_subdir, self.exif_tags, quality,
                    filter, size)

    def copy_original(self):
        copy_original(
            self.entry, self.process_filename,
            self.name_builder.fill('{basename}{ext}'))

    def extract_metadata(self, file):
        """ Extract all the metadata from the image and store """
        # Extract GPS data and store in Location
        gps_data = get_gps_data(self.exif_tags)

        if len(gps_data):
            Location.create({"position": gps_data}, self.entry)

        # Insert exif data into database
        exif_all = clean_exif(self.exif_tags)

        if len(exif_all):
            self.entry.media_data_init(exif_all=exif_all)

        # Extract file metadata
        try:
            im = Image.open(self.process_filename)
        except IOError:
            raise BadMediaFail()

        metadata = {
            "width": im.size[0],
            "height": im.size[1],
        }

        self.entry.set_file_metadata(file, **metadata)
开发者ID:piratas,项目名称:biblioteca,代码行数:84,代码来源:processing.py

示例12: CommonPdfProcessor

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
class CommonPdfProcessor(MediaProcessor):
    """
    Provides a base for various pdf processing steps
    """

    acceptable_files = ["original", "pdf"]

    def common_setup(self):
        """
        Set up common pdf processing steps
        """
        # Pull down and set up the processing file
        self.process_filename = get_process_filename(self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self._set_pdf_filename()

    def _set_pdf_filename(self):
        if self.name_builder.ext == ".pdf":
            self.pdf_filename = self.process_filename
        elif self.entry.media_files.get("pdf"):
            self.pdf_filename = self.workbench.localized_file(mgg.public_store, self.entry.media_files["pdf"])
        else:
            self.pdf_filename = self._generate_pdf()

    def _skip_processing(self, keyname, **kwargs):
        file_metadata = self.entry.get_file_metadata(keyname)
        skip = True

        if not file_metadata:
            return False

        if keyname == "thumb":
            if kwargs.get("thumb_size") != file_metadata.get("thumb_size"):
                skip = False
        elif keyname == "medium":
            if kwargs.get("size") != file_metadata.get("size"):
                skip = False

        return skip

    def copy_original(self):
        copy_original(self.entry, self.process_filename, self.name_builder.fill("{basename}{ext}"))

    def generate_thumb(self, thumb_size=None):
        if not thumb_size:
            thumb_size = (mgg.global_config["media:thumb"]["max_width"], mgg.global_config["media:thumb"]["max_height"])

        if self._skip_processing("thumb", thumb_size=thumb_size):
            return

        # Note: pdftocairo adds '.png', so don't include an ext
        thumb_filename = os.path.join(self.workbench.dir, self.name_builder.fill("{basename}.thumbnail"))

        executable = where("pdftocairo")
        args = [executable, "-scale-to", str(min(thumb_size)), "-singlefile", "-png", self.pdf_filename, thumb_filename]

        _log.debug("calling {0}".format(repr(" ".join(args))))
        Popen(executable=executable, args=args).wait()

        # since pdftocairo added '.png', we need to include it with the
        # filename
        store_public(self.entry, "thumb", thumb_filename + ".png", self.name_builder.fill("{basename}.thumbnail.png"))

        self.entry.set_file_metadata("thumb", thumb_size=thumb_size)

    def _generate_pdf(self):
        """
        Store the pdf. If the file is not a pdf, make it a pdf
        """
        tmp_pdf = self.process_filename

        unoconv = where("unoconv")
        Popen(executable=unoconv, args=[unoconv, "-v", "-f", "pdf", self.process_filename]).wait()

        if not os.path.exists(tmp_pdf):
            _log.debug("unoconv failed to convert file to pdf")
            raise BadMediaFail()

        store_public(self.entry, "pdf", tmp_pdf, self.name_builder.fill("{basename}.pdf"))

        return self.workbench.localized_file(mgg.public_store, self.entry.media_files["pdf"])

    def extract_pdf_info(self):
        pdf_info_dict = pdf_info(self.pdf_filename)
        self.entry.media_data_init(**pdf_info_dict)

    def generate_medium(self, size=None):
        if not size:
            size = (mgg.global_config["media:medium"]["max_width"], mgg.global_config["media:medium"]["max_height"])

        if self._skip_processing("medium", size=size):
            return

        # Note: pdftocairo adds '.png', so don't include an ext
        filename = os.path.join(self.workbench.dir, self.name_builder.fill("{basename}.medium"))

        executable = where("pdftocairo")
        args = [executable, "-scale-to", str(min(size)), "-singlefile", "-png", self.pdf_filename, filename]

#.........这里部分代码省略.........
开发者ID:rodney757,项目名称:mediagoblin,代码行数:103,代码来源:processing.py

示例13: process_audio

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
def process_audio(proc_state):
    """Code to process uploaded audio. Will be run by celery.

    A Workbench() represents a local tempory dir. It is automatically
    cleaned up when this function exits.
    """
    entry = proc_state.entry
    workbench = proc_state.workbench
    audio_config = mgg.global_config['media_type:mediagoblin.media_types.audio']

    queued_filepath = entry.queued_media_file
    queued_filename = workbench.localized_file(
        mgg.queue_store, queued_filepath,
        'source')
    name_builder = FilenameBuilder(queued_filename)

    webm_audio_filepath = create_pub_filepath(
        entry,
        '{original}.webm'.format(
            original=os.path.splitext(
                queued_filepath[-1])[0]))

    if audio_config['keep_original']:
        with open(queued_filename, 'rb') as queued_file:
            original_filepath = create_pub_filepath(
                entry, name_builder.fill('{basename}{ext}'))

            with mgg.public_store.get_file(original_filepath, 'wb') as \
                    original_file:
                _log.debug('Saving original...')
                original_file.write(queued_file.read())

            entry.media_files['original'] = original_filepath

    transcoder = AudioTranscoder()

    with NamedTemporaryFile(dir=workbench.dir) as webm_audio_tmp:
        progress_callback = ProgressCallback(entry)

        transcoder.transcode(
            queued_filename,
            webm_audio_tmp.name,
            quality=audio_config['quality'],
            progress_callback=progress_callback)

        transcoder.discover(webm_audio_tmp.name)

        _log.debug('Saving medium...')
        mgg.public_store.get_file(webm_audio_filepath, 'wb').write(
            webm_audio_tmp.read())

        entry.media_files['webm_audio'] = webm_audio_filepath

        # entry.media_data_init(length=int(data.audiolength))

    if audio_config['create_spectrogram']:
        spectrogram_filepath = create_pub_filepath(
            entry,
            '{original}-spectrogram.jpg'.format(
                original=os.path.splitext(
                    queued_filepath[-1])[0]))

        with NamedTemporaryFile(dir=workbench.dir, suffix='.ogg') as wav_tmp:
            _log.info('Creating OGG source for spectrogram')
            transcoder.transcode(
                queued_filename,
                wav_tmp.name,
                mux_string='vorbisenc quality={0} ! oggmux'.format(
                    audio_config['quality']))

            thumbnailer = AudioThumbnailer()

            with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as spectrogram_tmp:
                thumbnailer.spectrogram(
                    wav_tmp.name,
                    spectrogram_tmp.name,
                    width=mgg.global_config['media:medium']['max_width'],
                    fft_size=audio_config['spectrogram_fft_size'])

                _log.debug('Saving spectrogram...')
                mgg.public_store.get_file(spectrogram_filepath, 'wb').write(
                    spectrogram_tmp.read())

                entry.media_files['spectrogram'] = spectrogram_filepath

                with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as thumb_tmp:
                    thumbnailer.thumbnail_spectrogram(
                        spectrogram_tmp.name,
                        thumb_tmp.name,
                        (mgg.global_config['media:thumb']['max_width'],
                         mgg.global_config['media:thumb']['max_height']))

                    thumb_filepath = create_pub_filepath(
                        entry,
                        '{original}-thumbnail.jpg'.format(
                            original=os.path.splitext(
                                queued_filepath[-1])[0]))

                    mgg.public_store.get_file(thumb_filepath, 'wb').write(
                        thumb_tmp.read())
#.........这里部分代码省略.........
开发者ID:RichoHan,项目名称:MediaGoblin,代码行数:103,代码来源:processing.py

示例14: CommonAudioProcessor

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
class CommonAudioProcessor(MediaProcessor):
    """
    Provides a base for various audio processing steps
    """

    acceptable_files = ["original", "best_quality", "webm_audio"]

    def common_setup(self):
        """
        Setup the workbench directory and pull down the original file, add
        the audio_config, transcoder, thumbnailer and spectrogram_tmp path
        """
        self.audio_config = mgg.global_config["plugins"]["mediagoblin.media_types.audio"]

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self.transcoder = AudioTranscoder()
        self.thumbnailer = AudioThumbnailer()

    def copy_original(self):
        if self.audio_config["keep_original"]:
            copy_original(self.entry, self.process_filename, self.name_builder.fill("{basename}{ext}"))

    def _keep_best(self):
        """
        If there is no original, keep the best file that we have
        """
        if not self.entry.media_files.get("best_quality"):
            # Save the best quality file if no original?
            if not self.entry.media_files.get("original") and self.entry.media_files.get("webm_audio"):
                self.entry.media_files["best_quality"] = self.entry.media_files["webm_audio"]

    def _skip_processing(self, keyname, **kwargs):
        file_metadata = self.entry.get_file_metadata(keyname)
        skip = True

        if not file_metadata:
            return False

        if keyname == "webm_audio":
            if kwargs.get("quality") != file_metadata.get("quality"):
                skip = False
        elif keyname == "spectrogram":
            if kwargs.get("max_width") != file_metadata.get("max_width"):
                skip = False
            elif kwargs.get("fft_size") != file_metadata.get("fft_size"):
                skip = False
        elif keyname == "thumb":
            if kwargs.get("size") != file_metadata.get("size"):
                skip = False

        return skip

    def transcode(self, quality=None):
        if not quality:
            quality = self.audio_config["quality"]

        if self._skip_processing("webm_audio", quality=quality):
            return

        progress_callback = ProgressCallback(self.entry)
        webm_audio_tmp = os.path.join(self.workbench.dir, self.name_builder.fill("{basename}{ext}"))

        self.transcoder.transcode(
            self.process_filename, webm_audio_tmp, quality=quality, progress_callback=progress_callback
        )

        self._keep_best()

        _log.debug("Saving medium...")
        store_public(self.entry, "webm_audio", webm_audio_tmp, self.name_builder.fill("{basename}.medium.webm"))

        self.entry.set_file_metadata("webm_audio", **{"quality": quality})

    def create_spectrogram(self, max_width=None, fft_size=None):
        if not max_width:
            max_width = mgg.global_config["media:medium"]["max_width"]
        if not fft_size:
            fft_size = self.audio_config["spectrogram_fft_size"]

        if self._skip_processing("spectrogram", max_width=max_width, fft_size=fft_size):
            return
        wav_tmp = os.path.join(self.workbench.dir, self.name_builder.fill("{basename}.ogg"))
        _log.info("Creating OGG source for spectrogram")
        self.transcoder.transcode(self.process_filename, wav_tmp, mux_name="oggmux")
        spectrogram_tmp = os.path.join(self.workbench.dir, self.name_builder.fill("{basename}-spectrogram.jpg"))
        self.thumbnailer.spectrogram(wav_tmp, spectrogram_tmp, width=max_width, fft_size=fft_size)

        _log.debug("Saving spectrogram...")
        store_public(self.entry, "spectrogram", spectrogram_tmp, self.name_builder.fill("{basename}.spectrogram.jpg"))

        file_metadata = {"max_width": max_width, "fft_size": fft_size}
        self.entry.set_file_metadata("spectrogram", **file_metadata)

    def generate_thumb(self, size=None):
        if not size:
            max_width = mgg.global_config["media:thumb"]["max_width"]
            max_height = mgg.global_config["media:thumb"]["max_height"]
#.........这里部分代码省略.........
开发者ID:pythonsnake,项目名称:MediaDwarf,代码行数:103,代码来源:processing.py

示例15: process_video

# 需要导入模块: from mediagoblin.processing import FilenameBuilder [as 别名]
# 或者: from mediagoblin.processing.FilenameBuilder import fill [as 别名]
def process_video(proc_state):
    """
    Process a video entry, transcode the queued media files (originals) and
    create a thumbnail for the entry.

    A Workbench() represents a local tempory dir. It is automatically
    cleaned up when this function exits.
    """
    entry = proc_state.entry
    workbench = proc_state.workbench
    video_config = mgg.global_config['media_type:mediagoblin.media_types.video']

    queued_filepath = entry.queued_media_file
    queued_filename = proc_state.get_queued_filename()
    name_builder = FilenameBuilder(queued_filename)

    medium_filepath = create_pub_filepath(
        entry, name_builder.fill('{basename}-640p.webm'))

    thumbnail_filepath = create_pub_filepath(
        entry, name_builder.fill('{basename}.thumbnail.jpg'))

    # Create a temporary file for the video destination (cleaned up with workbench)
    tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)
    with tmp_dst:
        # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
        progress_callback = ProgressCallback(entry)

        dimensions = (
            mgg.global_config['media:medium']['max_width'],
            mgg.global_config['media:medium']['max_height'])

        # Extract metadata and keep a record of it
        metadata = transcoders.VideoTranscoder().discover(queued_filename)
        store_metadata(entry, metadata)

        # Figure out whether or not we need to transcode this video or
        # if we can skip it
        if skip_transcode(metadata):
            _log.debug('Skipping transcoding')

            dst_dimensions = metadata['videowidth'], metadata['videoheight']

            # Push original file to public storage
            _log.debug('Saving original...')
            proc_state.copy_original(queued_filepath[-1])

            did_transcode = False
        else:
            transcoder = transcoders.VideoTranscoder()

            transcoder.transcode(queued_filename, tmp_dst.name,
                    vp8_quality=video_config['vp8_quality'],
                    vp8_threads=video_config['vp8_threads'],
                    vorbis_quality=video_config['vorbis_quality'],
                    progress_callback=progress_callback,
                    dimensions=dimensions)

            dst_dimensions = transcoder.dst_data.videowidth,\
                    transcoder.dst_data.videoheight

            # Push transcoded video to public storage
            _log.debug('Saving medium...')
            mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)
            _log.debug('Saved medium')

            entry.media_files['webm_640'] = medium_filepath

            did_transcode = True

        # Save the width and height of the transcoded video
        entry.media_data_init(
            width=dst_dimensions[0],
            height=dst_dimensions[1])

    # Temporary file for the video thumbnail (cleaned up with workbench)
    tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)

    with tmp_thumb:
        # Create a thumbnail.jpg that fits in a 180x180 square
        transcoders.VideoThumbnailerMarkII(
                queued_filename,
                tmp_thumb.name,
                180)

        # Push the thumbnail to public storage
        _log.debug('Saving thumbnail...')
        mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)
        entry.media_files['thumb'] = thumbnail_filepath

    # save the original... but only if we did a transcoding
    # (if we skipped transcoding and just kept the original anyway as the main
    #  media, then why would we save the original twice?)
    if video_config['keep_original'] and did_transcode:
        # Push original file to public storage
        _log.debug('Saving original...')
        proc_state.copy_original(queued_filepath[-1])

    # Remove queued media file from storage and database
    proc_state.delete_queue_file()
开发者ID:praveen97uma,项目名称:goblin,代码行数:102,代码来源:processing.py


注:本文中的mediagoblin.processing.FilenameBuilder.fill方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。