aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mediagoblin/media_types/ascii/processing.py5
-rw-r--r--mediagoblin/media_types/audio/processing.py5
-rw-r--r--mediagoblin/media_types/image/processing.py6
-rw-r--r--mediagoblin/media_types/pdf/processing.py5
-rw-r--r--mediagoblin/media_types/raw_image/processing.py5
-rw-r--r--mediagoblin/media_types/stl/processing.py5
-rw-r--r--mediagoblin/media_types/video/processing.py111
-rw-r--r--mediagoblin/media_types/video/util.py9
-rw-r--r--mediagoblin/processing/__init__.py7
-rw-r--r--mediagoblin/submit/lib.py10
10 files changed, 119 insertions, 49 deletions
diff --git a/mediagoblin/media_types/ascii/processing.py b/mediagoblin/media_types/ascii/processing.py
index 00d04e63..71ccc86e 100644
--- a/mediagoblin/media_types/ascii/processing.py
+++ b/mediagoblin/media_types/ascii/processing.py
@@ -273,3 +273,8 @@ class AsciiProcessingManager(ProcessingManager):
super(AsciiProcessingManager, self).__init__()
self.add_processor(InitialProcessor)
self.add_processor(Resizer)
+
+ def workflow(self, entry, feed_url, reprocess_action, reprocess_info=None):
+ ProcessMedia().apply_async(
+ [entry.id, feed_url, reprocess_action, reprocess_info], {},
+ task_id=entry.queued_task_id)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 427309de..a83d60f7 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -365,3 +365,8 @@ class AudioProcessingManager(ProcessingManager):
self.add_processor(InitialProcessor)
self.add_processor(Resizer)
self.add_processor(Transcoder)
+
+ def workflow(self, entry, feed_url, reprocess_action, reprocess_info=None):
+ ProcessMedia().apply_async(
+ [entry.id, feed_url, reprocess_action, reprocess_info], {},
+ task_id=entry.queued_task_id)
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 14091d6e..42234eff 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -431,6 +431,12 @@ class ImageProcessingManager(ProcessingManager):
self.add_processor(Resizer)
self.add_processor(MetadataProcessing)
+ def workflow(self, entry, feed_url, reprocess_action, reprocess_info=None):
+ ProcessMedia().apply_async(
+ [entry.id, feed_url, reprocess_action, reprocess_info], {},
+ task_id=entry.queued_task_id)
+
+
if __name__ == '__main__':
import sys
import pprint
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index ac4bab6d..d93b19bb 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -470,3 +470,8 @@ class PdfProcessingManager(ProcessingManager):
super(PdfProcessingManager, self).__init__()
self.add_processor(InitialProcessor)
self.add_processor(Resizer)
+
+ def workflow(self, entry, feed_url, reprocess_action, reprocess_info=None):
+ ProcessMedia().apply_async(
+ [entry.id, feed_url, reprocess_action, reprocess_info], {},
+ task_id=entry.queued_task_id)
diff --git a/mediagoblin/media_types/raw_image/processing.py b/mediagoblin/media_types/raw_image/processing.py
index 740ba2dd..a385d563 100644
--- a/mediagoblin/media_types/raw_image/processing.py
+++ b/mediagoblin/media_types/raw_image/processing.py
@@ -80,3 +80,8 @@ class RawImageProcessingManager(ProcessingManager):
super(RawImageProcessingManager, self).__init__()
self.add_processor(InitialRawProcessor)
self.add_processor(Resizer)
+
+ def workflow(self, entry, feed_url, reprocess_action, reprocess_info=None):
+ ProcessMedia().apply_async(
+ [entry.id, feed_url, reprocess_action, reprocess_info], {},
+ task_id=entry.queued_task_id)
diff --git a/mediagoblin/media_types/stl/processing.py b/mediagoblin/media_types/stl/processing.py
index 55764aeb..7f2f350d 100644
--- a/mediagoblin/media_types/stl/processing.py
+++ b/mediagoblin/media_types/stl/processing.py
@@ -368,3 +368,8 @@ class StlProcessingManager(ProcessingManager):
super(StlProcessingManager, self).__init__()
self.add_processor(InitialProcessor)
self.add_processor(Resizer)
+
+ def workflow(self, entry, feed_url, reprocess_action, reprocess_info=None):
+ ProcessMedia().apply_async(
+ [entry.id, feed_url, reprocess_action, reprocess_info], {},
+ task_id=entry.queued_task_id)
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
index ca3087a2..4dee8d55 100644
--- a/mediagoblin/media_types/video/processing.py
+++ b/mediagoblin/media_types/video/processing.py
@@ -28,6 +28,7 @@ from mediagoblin.processing import (
ProcessingManager, request_from_args,
get_process_filename, store_public,
copy_original)
+from mediagoblin.processing.task import ProcessMedia
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from mediagoblin.media_types import MissingComponents
@@ -159,14 +160,40 @@ def store_metadata(media_entry, metadata):
if len(stored_metadata):
media_entry.media_data_init(orig_metadata=stored_metadata)
+# =====================
+
+
+def main_task(**process_info):
+ processor = CommonVideoProcessor(process_info['manager'], process_info['entry'])
+ processor.common_setup(process_info['resolution'])
+ processor.transcode(medium_size=process_info['medium_size'], vp8_quality=process_info['vp8_quality'],
+ vp8_threads=process_info['vp8_threads'], vorbis_quality=process_info['vorbis_quality'])
+ processor.generate_thumb(thumb_size=process_info['thumb_size'])
+ processor.store_orig_metadata()
+
+
+def complimentary_task(**process_info):
+ processor = CommonVideoProcessor(process_info['manager'], process_info['entry'])
+ processor.common_setup(process_info['resolution'])
+ processor.transcode(medium_size=process_info['medium_size'], vp8_quality=process_info['vp8_quality'],
+ vp8_threads=process_info['vp8_threads'], vorbis_quality=process_info['vorbis_quality'])
+
+
+def processing_cleanup(**process_info):
+ processor = CommonVideoProcessor(process_info['manager'], process_info['entry'])
+ processor.delete_queue_file()
+
+# =====================
+
class CommonVideoProcessor(MediaProcessor):
"""
Provides a base for various video processing steps
"""
- acceptable_files = ['original', 'best_quality', 'webm_video']
+ acceptable_files = ['original, best_quality', 'webm_144p', 'webm_360p',
+ 'webm_480p', 'webm_720p', 'webm_1080p', 'webm_video']
- def common_setup(self):
+ def common_setup(self, resolution=None):
self.video_config = mgg \
.global_config['plugins'][MEDIA_TYPE]
@@ -178,24 +205,23 @@ class CommonVideoProcessor(MediaProcessor):
self.transcoder = transcoders.VideoTranscoder()
self.did_transcode = False
+ if resolution:
+ self.curr_file = 'webm_' + str(resolution)
+ self.part_filename = (self.name_builder.fill('{basename}.' +
+ str(resolution) + '.webm'))
+ else:
+ self.curr_file = 'webm_video'
+ self.part_filename = self.name_builder.fill('{basename}.medium.webm')
+
def copy_original(self):
# If we didn't transcode, then we need to keep the original
- if not self.did_transcode or \
- (self.video_config['keep_original'] and self.did_transcode):
- copy_original(
- self.entry, self.process_filename,
- self.name_builder.fill('{basename}{ext}'))
+ raise NotImplementedError
def _keep_best(self):
"""
If there is no original, keep the best file that we have
"""
- if not self.entry.media_files.get('best_quality'):
- # Save the best quality file if no original?
- if not self.entry.media_files.get('original') and \
- self.entry.media_files.get('webm_video'):
- self.entry.media_files['best_quality'] = self.entry \
- .media_files['webm_video']
+ raise NotImplementedError
def _skip_processing(self, keyname, **kwargs):
file_metadata = self.entry.get_file_metadata(keyname)
@@ -204,7 +230,7 @@ class CommonVideoProcessor(MediaProcessor):
return False
skip = True
- if keyname == 'webm_video':
+ if 'webm' in keyname:
if kwargs.get('medium_size') != file_metadata.get('medium_size'):
skip = False
elif kwargs.get('vp8_quality') != file_metadata.get('vp8_quality'):
@@ -224,8 +250,7 @@ class CommonVideoProcessor(MediaProcessor):
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
vorbis_quality=None):
progress_callback = ProgressCallback(self.entry)
- tmp_dst = os.path.join(self.workbench.dir,
- self.name_builder.fill('{basename}.medium.webm'))
+ tmp_dst = os.path.join(self.workbench.dir, self.part_filename)
if not medium_size:
medium_size = (
@@ -243,32 +268,23 @@ class CommonVideoProcessor(MediaProcessor):
'vp8_quality': vp8_quality,
'vorbis_quality': vorbis_quality}
- if self._skip_processing('webm_video', **file_metadata):
+ if self._skip_processing(self.curr_file, **file_metadata):
return
- # Extract metadata and keep a record of it
metadata = transcoders.discover(self.process_filename)
-
- # metadata's stream info here is a DiscovererContainerInfo instance,
- # it gets split into DiscovererAudioInfo and DiscovererVideoInfo;
- # metadata itself has container-related data in tags, like video-codec
- store_metadata(self.entry, metadata)
-
orig_dst_dimensions = (metadata.get_video_streams()[0].get_width(),
- metadata.get_video_streams()[0].get_height())
+ metadata.get_video_streams()[0].get_height())
# Figure out whether or not we need to transcode this video or
# if we can skip it
if skip_transcode(metadata, medium_size):
_log.debug('Skipping transcoding')
- dst_dimensions = orig_dst_dimensions
-
# If there is an original and transcoded, delete the transcoded
# since it must be of lower quality then the original
if self.entry.media_files.get('original') and \
- self.entry.media_files.get('webm_video'):
- self.entry.media_files['webm_video'].delete()
+ self.entry.media_files.get(self.curr_file):
+ self.entry.media_files[self.curr_file].delete()
else:
self.transcoder.transcode(self.process_filename, tmp_dst,
@@ -278,27 +294,16 @@ class CommonVideoProcessor(MediaProcessor):
progress_callback=progress_callback,
dimensions=tuple(medium_size))
if self.transcoder.dst_data:
- video_info = self.transcoder.dst_data.get_video_streams()[0]
- dst_dimensions = (video_info.get_width(),
- video_info.get_height())
- self._keep_best()
-
# Push transcoded video to public storage
_log.debug('Saving medium...')
store_public(self.entry, 'webm_video', tmp_dst,
self.name_builder.fill('{basename}.medium.webm'))
_log.debug('Saved medium')
- self.entry.set_file_metadata('webm_video', **file_metadata)
+ # Is this the file_metadata that paroneayea was talking about?
+ self.entry.set_file_metadata(self.curr_file, **file_metadata)
self.did_transcode = True
- else:
- dst_dimensions = orig_dst_dimensions
-
- # Save the width and height of the transcoded video
- self.entry.media_data_init(
- width=dst_dimensions[0],
- height=dst_dimensions[1])
def generate_thumb(self, thumb_size=None):
# Temporary file for the video thumbnail (cleaned up with workbench)
@@ -330,6 +335,17 @@ class CommonVideoProcessor(MediaProcessor):
self.entry.set_file_metadata('thumb', thumb_size=thumb_size)
+ def store_orig_metadata(self):
+
+ # Extract metadata and keep a record of it
+ metadata = transcoders.discover(self.process_filename)
+
+ # metadata's stream info here is a DiscovererContainerInfo instance,
+ # it gets split into DiscovererAudioInfo and DiscovererVideoInfo;
+ # metadata itself has container-related data in tags, like video-codec
+ store_metadata(self.entry, metadata)
+
+
class InitialProcessor(CommonVideoProcessor):
"""
Initial processing steps for new video
@@ -386,9 +402,9 @@ class InitialProcessor(CommonVideoProcessor):
'vorbis_quality', 'thumb_size'])
def process(self, medium_size=None, vp8_threads=None, vp8_quality=None,
- vorbis_quality=None, thumb_size=None):
- self.common_setup()
-
+ vorbis_quality=None, thumb_size=None, resolution=None):
+ self.common_setup(resolution=resolution)
+ self.store_orig_metadata()
self.transcode(medium_size=medium_size, vp8_quality=vp8_quality,
vp8_threads=vp8_threads, vorbis_quality=vorbis_quality)
@@ -503,3 +519,8 @@ class VideoProcessingManager(ProcessingManager):
self.add_processor(InitialProcessor)
self.add_processor(Resizer)
self.add_processor(Transcoder)
+
+ def workflow(self, entry, feed_url, reprocess_action, reprocess_info=None):
+ ProcessMedia().apply_async(
+ [entry.id, feed_url, reprocess_action, reprocess_info], {},
+ task_id=entry.queued_task_id)
diff --git a/mediagoblin/media_types/video/util.py b/mediagoblin/media_types/video/util.py
index 8b65d839..1f5e907d 100644
--- a/mediagoblin/media_types/video/util.py
+++ b/mediagoblin/media_types/video/util.py
@@ -18,6 +18,15 @@ import logging
from mediagoblin import mg_globals as mgg
+ACCEPTED_RESOLUTIONS = {
+ '144p' : (256, 144),
+ '240p' : (352, 240),
+ '360p' : (480, 360),
+ '480p' : (858, 480),
+ '720p' : (1280, 720),
+ '1080p' : (1920, 1080),
+}
+
_log = logging.getLogger(__name__)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 29345227..4e5853c1 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -257,6 +257,13 @@ class ProcessingManager(object):
return processor
+ def workflow(self, entry, feed_url, reprocess_action, reprocess_info=None):
+ """
+ Returns the Celery command needed to proceed with media processing
+ *This method has to be implemented in all media types*
+ """
+ raise NotImplementedError
+
def request_from_args(args, which_args):
"""
diff --git a/mediagoblin/submit/lib.py b/mediagoblin/submit/lib.py
index 08a603e9..402eb851 100644
--- a/mediagoblin/submit/lib.py
+++ b/mediagoblin/submit/lib.py
@@ -28,7 +28,7 @@ from mediagoblin.tools.response import json_response
from mediagoblin.tools.text import convert_to_tag_list_of_dicts
from mediagoblin.tools.federation import create_activity, create_generator
from mediagoblin.db.models import Collection, MediaEntry, ProcessingMetaData
-from mediagoblin.processing import mark_entry_failed
+from mediagoblin.processing import mark_entry_failed, get_entry_and_processing_manager
from mediagoblin.processing.task import ProcessMedia
from mediagoblin.notifications import add_comment_subscription
from mediagoblin.media_types import sniff_media
@@ -262,10 +262,12 @@ def run_process_media(entry, feed_url=None,
:param reprocess_action: What particular action should be run.
:param reprocess_info: A dict containing all of the necessary reprocessing
info for the given media_type"""
+
+ reprocess_info = reprocess_info or {}
+ entry, manager = get_entry_and_processing_manager(entry.id)
+
try:
- ProcessMedia().apply_async(
- [entry.id, feed_url, reprocess_action, reprocess_info], {},
- task_id=entry.queued_task_id)
+ manager.workflow(entry, feed_url, reprocess_action, reprocess_info)
except BaseException as exc:
# The purpose of this section is because when running in "lazy"
# or always-eager-with-exceptions-propagated celery mode that