diff options
author | Christopher Allan Webber <cwebber@dustycloud.org> | 2013-03-01 16:29:22 -0600 |
---|---|---|
committer | Christopher Allan Webber <cwebber@dustycloud.org> | 2013-03-01 16:29:22 -0600 |
commit | ed6344009abb31326acabe95449aaa449f66f4b4 (patch) | |
tree | 03d65052d550b890074dde3c18857371be974f5c /mediagoblin/media_types | |
parent | 5c754fdaeeddb1bbeff165bbecb77e33b75b3c7d (diff) | |
parent | b7a3798e1806eb52a244088fb600376ff25e0de9 (diff) | |
download | mediagoblin-ed6344009abb31326acabe95449aaa449f66f4b4.tar.lz mediagoblin-ed6344009abb31326acabe95449aaa449f66f4b4.tar.xz mediagoblin-ed6344009abb31326acabe95449aaa449f66f4b4.zip |
Merge branch 'master' into joar-skip_transcoding
Conflicts:
mediagoblin/config_spec.ini
Diffstat (limited to 'mediagoblin/media_types')
-rw-r--r-- | mediagoblin/media_types/__init__.py | 3 | ||||
-rw-r--r-- | mediagoblin/media_types/ascii/models.py | 3 | ||||
-rw-r--r-- | mediagoblin/media_types/ascii/processing.py | 6 | ||||
-rw-r--r-- | mediagoblin/media_types/audio/models.py | 3 | ||||
-rw-r--r-- | mediagoblin/media_types/audio/processing.py | 6 | ||||
-rw-r--r-- | mediagoblin/media_types/image/models.py | 3 | ||||
-rw-r--r-- | mediagoblin/media_types/image/processing.py | 41 | ||||
-rw-r--r-- | mediagoblin/media_types/stl/models.py | 3 | ||||
-rw-r--r-- | mediagoblin/media_types/stl/processing.py | 7 | ||||
-rw-r--r-- | mediagoblin/media_types/video/models.py | 3 | ||||
-rw-r--r-- | mediagoblin/media_types/video/processing.py | 18 | ||||
-rw-r--r-- | mediagoblin/media_types/video/transcoders.py | 37 |
12 files changed, 89 insertions, 44 deletions
diff --git a/mediagoblin/media_types/__init__.py b/mediagoblin/media_types/__init__.py index 06763510..0abb38d3 100644 --- a/mediagoblin/media_types/__init__.py +++ b/mediagoblin/media_types/__init__.py @@ -49,7 +49,8 @@ def sniff_media(media): for media_type, manager in get_media_managers(): _log.info('Sniffing {0}'.format(media_type)) - if manager['sniff_handler'](media_file, media=media): + if 'sniff_handler' in manager and \ + manager['sniff_handler'](media_file, media=media): _log.info('{0} accepts the file'.format(media_type)) return media_type, manager else: diff --git a/mediagoblin/media_types/ascii/models.py b/mediagoblin/media_types/ascii/models.py index 3416993c..c7505292 100644 --- a/mediagoblin/media_types/ascii/models.py +++ b/mediagoblin/media_types/ascii/models.py @@ -32,7 +32,8 @@ class AsciiData(Base): media_entry = Column(Integer, ForeignKey('core__media_entries.id'), primary_key=True) get_media_entry = relationship("MediaEntry", - backref=backref(BACKREF_NAME, cascade="all, delete-orphan")) + backref=backref(BACKREF_NAME, uselist=False, + cascade="all, delete-orphan")) DATA_MODEL = AsciiData diff --git a/mediagoblin/media_types/ascii/processing.py b/mediagoblin/media_types/ascii/processing.py index 254717eb..382cd015 100644 --- a/mediagoblin/media_types/ascii/processing.py +++ b/mediagoblin/media_types/ascii/processing.py @@ -19,7 +19,6 @@ import Image import logging from mediagoblin import mg_globals as mgg -from mediagoblin.decorators import get_workbench from mediagoblin.processing import create_pub_filepath from mediagoblin.media_types.ascii import asciitoimage @@ -39,13 +38,14 @@ def sniff_handler(media_file, **kw): return False -@get_workbench -def process_ascii(entry, workbench=None): +def process_ascii(proc_state): """Code to process a txt file. Will be run by celery. A Workbench() represents a local tempory dir. It is automatically cleaned up when this function exits. """ + entry = proc_state.entry + workbench = proc_state.workbench ascii_config = mgg.global_config['media_type:mediagoblin.media_types.ascii'] # Conversions subdirectory to avoid collisions conversions_subdir = os.path.join( diff --git a/mediagoblin/media_types/audio/models.py b/mediagoblin/media_types/audio/models.py index 368ab1eb..d01367d5 100644 --- a/mediagoblin/media_types/audio/models.py +++ b/mediagoblin/media_types/audio/models.py @@ -32,7 +32,8 @@ class AudioData(Base): media_entry = Column(Integer, ForeignKey('core__media_entries.id'), primary_key=True) get_media_entry = relationship("MediaEntry", - backref=backref(BACKREF_NAME, cascade="all, delete-orphan")) + backref=backref(BACKREF_NAME, uselist=False, + cascade="all, delete-orphan")) DATA_MODEL = AudioData diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py index e12cefe6..5dffcaf9 100644 --- a/mediagoblin/media_types/audio/processing.py +++ b/mediagoblin/media_types/audio/processing.py @@ -19,7 +19,6 @@ from tempfile import NamedTemporaryFile import os from mediagoblin import mg_globals as mgg -from mediagoblin.decorators import get_workbench from mediagoblin.processing import (create_pub_filepath, BadMediaFail, FilenameBuilder, ProgressCallback) @@ -43,13 +42,14 @@ def sniff_handler(media_file, **kw): return False -@get_workbench -def process_audio(entry, workbench=None): +def process_audio(proc_state): """Code to process uploaded audio. Will be run by celery. A Workbench() represents a local tempory dir. It is automatically cleaned up when this function exits. """ + entry = proc_state.entry + workbench = proc_state.workbench audio_config = mgg.global_config['media_type:mediagoblin.media_types.audio'] queued_filepath = entry.queued_media_file diff --git a/mediagoblin/media_types/image/models.py b/mediagoblin/media_types/image/models.py index 63d80aa8..b2ea3960 100644 --- a/mediagoblin/media_types/image/models.py +++ b/mediagoblin/media_types/image/models.py @@ -33,7 +33,8 @@ class ImageData(Base): media_entry = Column(Integer, ForeignKey('core__media_entries.id'), primary_key=True) get_media_entry = relationship("MediaEntry", - backref=backref(BACKREF_NAME, cascade="all, delete-orphan")) + backref=backref(BACKREF_NAME, uselist=False, + cascade="all, delete-orphan")) width = Column(Integer) height = Column(Integer) diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py index e6a34ca0..ca88d3f4 100644 --- a/mediagoblin/media_types/image/processing.py +++ b/mediagoblin/media_types/image/processing.py @@ -19,7 +19,6 @@ import os import logging from mediagoblin import mg_globals as mgg -from mediagoblin.decorators import get_workbench from mediagoblin.processing import BadMediaFail, \ create_pub_filepath, FilenameBuilder from mediagoblin.tools.exif import exif_fix_image_orientation, \ @@ -28,6 +27,12 @@ from mediagoblin.tools.exif import exif_fix_image_orientation, \ _log = logging.getLogger(__name__) +PIL_FILTERS = { + 'NEAREST': Image.NEAREST, + 'BILINEAR': Image.BILINEAR, + 'BICUBIC': Image.BICUBIC, + 'ANTIALIAS': Image.ANTIALIAS} + def resize_image(entry, filename, new_path, exif_tags, workdir, new_size, size_limits=(0, 0)): @@ -47,7 +52,19 @@ def resize_image(entry, filename, new_path, exif_tags, workdir, new_size, except IOError: raise BadMediaFail() resized = exif_fix_image_orientation(resized, exif_tags) # Fix orientation - resized.thumbnail(new_size, Image.ANTIALIAS) + + filter_config = \ + mgg.global_config['media_type:mediagoblin.media_types.image']\ + ['resize_filter'] + + try: + resize_filter = PIL_FILTERS[filter_config.upper()] + except KeyError: + raise Exception('Filter "{0}" not found, choose one of {1}'.format( + unicode(filter_config), + u', '.join(PIL_FILTERS.keys()))) + + resized.thumbnail(new_size, resize_filter) # Copy the new file to the conversion subdir, then remotely. tmp_resized_filename = os.path.join(workdir, new_path[-1]) @@ -77,21 +94,21 @@ def sniff_handler(media_file, **kw): return False -@get_workbench -def process_image(entry, workbench=None): +def process_image(proc_state): """Code to process an image. Will be run by celery. A Workbench() represents a local tempory dir. It is automatically cleaned up when this function exits. """ + entry = proc_state.entry + workbench = proc_state.workbench + # Conversions subdirectory to avoid collisions conversions_subdir = os.path.join( workbench.dir, 'conversions') os.mkdir(conversions_subdir) - queued_filepath = entry.queued_media_file - queued_filename = workbench.localized_file( - mgg.queue_store, queued_filepath, - 'source') + + queued_filename = proc_state.get_queued_filename() name_builder = FilenameBuilder(queued_filename) # EXIF extraction @@ -124,18 +141,14 @@ def process_image(entry, workbench=None): medium_filepath = None # Copy our queued local workbench to its final destination - original_filepath = create_pub_filepath( - entry, name_builder.fill('{basename}{ext}')) - mgg.public_store.copy_local_to_storage(queued_filename, original_filepath) + proc_state.copy_original(name_builder.fill('{basename}{ext}')) # Remove queued media file from storage and database - mgg.queue_store.delete_file(queued_filepath) - entry.queued_media_file = [] + proc_state.delete_queue_file() # Insert media file information into database media_files_dict = entry.setdefault('media_files', {}) media_files_dict[u'thumb'] = thumb_filepath - media_files_dict[u'original'] = original_filepath if medium_filepath: media_files_dict[u'medium'] = medium_filepath diff --git a/mediagoblin/media_types/stl/models.py b/mediagoblin/media_types/stl/models.py index 17091f0e..ff50e9c0 100644 --- a/mediagoblin/media_types/stl/models.py +++ b/mediagoblin/media_types/stl/models.py @@ -32,7 +32,8 @@ class StlData(Base): media_entry = Column(Integer, ForeignKey('core__media_entries.id'), primary_key=True) get_media_entry = relationship("MediaEntry", - backref=backref(BACKREF_NAME, cascade="all, delete-orphan")) + backref=backref(BACKREF_NAME, uselist=False, + cascade="all, delete-orphan")) center_x = Column(Float) center_y = Column(Float) diff --git a/mediagoblin/media_types/stl/processing.py b/mediagoblin/media_types/stl/processing.py index 3089f295..77744ac5 100644 --- a/mediagoblin/media_types/stl/processing.py +++ b/mediagoblin/media_types/stl/processing.py @@ -21,7 +21,6 @@ import subprocess import pkg_resources from mediagoblin import mg_globals as mgg -from mediagoblin.decorators import get_workbench from mediagoblin.processing import create_pub_filepath, \ FilenameBuilder @@ -76,13 +75,15 @@ def blender_render(config): env=env) -@get_workbench -def process_stl(entry, workbench=None): +def process_stl(proc_state): """Code to process an stl or obj model. Will be run by celery. A Workbench() represents a local tempory dir. It is automatically cleaned up when this function exits. """ + entry = proc_state.entry + workbench = proc_state.workbench + queued_filepath = entry.queued_media_file queued_filename = workbench.localized_file( mgg.queue_store, queued_filepath, 'source') diff --git a/mediagoblin/media_types/video/models.py b/mediagoblin/media_types/video/models.py index 645ef4d3..a771352c 100644 --- a/mediagoblin/media_types/video/models.py +++ b/mediagoblin/media_types/video/models.py @@ -32,7 +32,8 @@ class VideoData(Base): media_entry = Column(Integer, ForeignKey('core__media_entries.id'), primary_key=True) get_media_entry = relationship("MediaEntry", - backref=backref(BACKREF_NAME, cascade="all, delete-orphan")) + backref=backref(BACKREF_NAME, uselist=False, + cascade="all, delete-orphan")) width = Column(SmallInteger) height = Column(SmallInteger) diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py index 53fe1a73..41929f3d 100644 --- a/mediagoblin/media_types/video/processing.py +++ b/mediagoblin/media_types/video/processing.py @@ -18,7 +18,6 @@ from tempfile import NamedTemporaryFile import logging from mediagoblin import mg_globals as mgg -from mediagoblin.decorators import get_workbench from mediagoblin.processing import \ create_pub_filepath, FilenameBuilder, BaseProcessingFail, ProgressCallback from mediagoblin.tools.translate import lazy_pass_to_ugettext as _ @@ -54,8 +53,8 @@ def sniff_handler(media_file, **kw): return False -@get_workbench -def process_video(entry, workbench=None): + +def process_video(proc_state): """ Process a video entry, transcode the queued media files (originals) and create a thumbnail for the entry. @@ -63,12 +62,12 @@ def process_video(entry, workbench=None): A Workbench() represents a local tempory dir. It is automatically cleaned up when this function exits. """ + entry = proc_state.entry + workbench = proc_state.workbench video_config = mgg.global_config['media_type:mediagoblin.media_types.video'] queued_filepath = entry.queued_media_file - queued_filename = workbench.localized_file( - mgg.queue_store, queued_filepath, - 'source') + queued_filename = proc_state.get_queued_filename() name_builder = FilenameBuilder(queued_filename) medium_filepath = create_pub_filepath( @@ -138,8 +137,7 @@ def process_video(entry, workbench=None): if video_config['keep_original']: # Push original file to public storage _log.debug('Saving original...') - original_filepath = create_pub_filepath(entry, queued_filepath[-1]) - mgg.public_store.copy_local_to_storage(queued_filename, original_filepath) - entry.media_files['original'] = original_filepath + proc_state.copy_original(queued_filepath[-1]) - mgg.queue_store.delete_file(queued_filepath) + # Remove queued media file from storage and database + proc_state.delete_queue_file() diff --git a/mediagoblin/media_types/video/transcoders.py b/mediagoblin/media_types/video/transcoders.py index 8aa7121f..d8290d41 100644 --- a/mediagoblin/media_types/video/transcoders.py +++ b/mediagoblin/media_types/video/transcoders.py @@ -477,8 +477,8 @@ from playbin') _log.debug('thumbnail message: {0}'.format(message)) if message.type == gst.MESSAGE_ERROR: - _log.error('thumbnail error: {0}'.format(message)) - gobject.idle_add(self.on_thumbnail_error) + _log.error('thumbnail error: {0}'.format(message.parse_error())) + gobject.idle_add(self.on_thumbnail_error, message) if message.type == gst.MESSAGE_STATE_CHANGED: prev_state, cur_state, pending_state = \ @@ -570,10 +570,37 @@ pending: {2}'.format( return False - def on_thumbnail_error(self): - _log.error('Thumbnailing failed.') + def on_thumbnail_error(self, message): + scaling_failed = False + + if 'Error calculating the output scaled size - integer overflow' \ + in message.parse_error()[1]: + # GStreamer videoscale sometimes fails to calculate the dimensions + # given only one of the destination dimensions and the source + # dimensions. This is a workaround in case videoscale returns an + # error that indicates this has happened. + scaling_failed = True + _log.error('Thumbnailing failed because of videoscale integer' + ' overflow. Will retry with fallback.') + else: + _log.error('Thumbnailing failed: {0}'.format(message.parse_error())) + + # Kill the current mainloop self.disconnect() + if scaling_failed: + # Manually scale the destination dimensions + _log.info('Retrying with manually set sizes...') + + info = VideoTranscoder().discover(self.source_path) + + h = info['videoheight'] + w = info['videowidth'] + ratio = 180 / int(w) + h = int(h * ratio) + + self.__init__(self.source_path, self.dest_path, 180, h) + def disconnect(self): self.state = self.STATE_HALTING @@ -1009,4 +1036,4 @@ if __name__ == '__main__': print('I\'m a callback!') transcoder.transcode(*args, progress_callback=cb) elif options.action == 'discover': - print transcoder.discover(*args).__dict__ + print transcoder.discover(*args) |