aboutsummaryrefslogtreecommitdiffstats
path: root/mediagoblin
diff options
context:
space:
mode:
Diffstat (limited to 'mediagoblin')
-rw-r--r--mediagoblin/config_spec.ini2
-rw-r--r--mediagoblin/db/migrations.py18
-rw-r--r--mediagoblin/db/models.py11
-rw-r--r--mediagoblin/edit/forms.py2
-rw-r--r--mediagoblin/edit/views.py4
-rw-r--r--mediagoblin/gmg_commands/__init__.py4
-rw-r--r--mediagoblin/gmg_commands/import_export.py2
-rw-r--r--mediagoblin/gmg_commands/reprocess.py302
-rw-r--r--mediagoblin/gmg_commands/util.py2
-rw-r--r--mediagoblin/media_types/ascii/__init__.py4
-rw-r--r--mediagoblin/media_types/ascii/processing.py291
-rw-r--r--mediagoblin/media_types/audio/__init__.py5
-rw-r--r--mediagoblin/media_types/audio/processing.py412
-rw-r--r--mediagoblin/media_types/audio/transcoders.py3
-rw-r--r--mediagoblin/media_types/image/__init__.py9
-rw-r--r--mediagoblin/media_types/image/processing.py301
-rw-r--r--mediagoblin/media_types/pdf/__init__.py4
-rw-r--r--mediagoblin/media_types/pdf/processing.py248
-rw-r--r--mediagoblin/media_types/stl/__init__.py4
-rw-r--r--mediagoblin/media_types/stl/processing.py353
-rw-r--r--mediagoblin/media_types/video/__init__.py6
-rw-r--r--mediagoblin/media_types/video/migrations.py17
-rw-r--r--mediagoblin/media_types/video/models.py6
-rw-r--r--mediagoblin/media_types/video/processing.py421
-rw-r--r--mediagoblin/media_types/video/util.py6
-rw-r--r--mediagoblin/notifications/__init__.py18
-rw-r--r--mediagoblin/notifications/routing.py4
-rw-r--r--mediagoblin/notifications/views.py29
-rw-r--r--mediagoblin/processing/__init__.py318
-rw-r--r--mediagoblin/processing/task.py56
-rw-r--r--mediagoblin/static/js/notifications.js13
-rw-r--r--mediagoblin/submit/lib.py14
-rw-r--r--mediagoblin/submit/views.py3
-rw-r--r--mediagoblin/templates/mediagoblin/base.html9
-rw-r--r--mediagoblin/templates/mediagoblin/fragments/header_notifications.html4
-rw-r--r--mediagoblin/templates/mediagoblin/media_displays/video.html6
-rw-r--r--mediagoblin/tests/test_celery_setup.py2
-rw-r--r--mediagoblin/tests/test_mgoblin_app.ini2
-rw-r--r--mediagoblin/tests/test_notifications.py53
-rw-r--r--mediagoblin/tests/test_persona.py2
-rw-r--r--mediagoblin/user_pages/views.py5
41 files changed, 2315 insertions, 660 deletions
diff --git a/mediagoblin/config_spec.ini b/mediagoblin/config_spec.ini
index 81dadd25..8f03509d 100644
--- a/mediagoblin/config_spec.ini
+++ b/mediagoblin/config_spec.ini
@@ -152,7 +152,7 @@ CELERY_RESULT_DBURI = string(default="sqlite:///%(here)s/celery.db")
# default kombu stuff
BROKER_TRANSPORT = string(default="sqlalchemy")
-BROKER_HOST = string(default="sqlite:///%(here)s/kombu.db")
+BROKER_URL = string(default="sqlite:///%(here)s/kombu.db")
# known booleans
CELERY_RESULT_PERSISTENT = boolean()
diff --git a/mediagoblin/db/migrations.py b/mediagoblin/db/migrations.py
index 374ab4c8..62fb7e8d 100644
--- a/mediagoblin/db/migrations.py
+++ b/mediagoblin/db/migrations.py
@@ -365,6 +365,8 @@ def add_new_notification_tables(db):
CommentNotification_v0.__table__.create(db.bind)
ProcessingNotification_v0.__table__.create(db.bind)
+ db.commit()
+
@RegisterMigration(13, MIGRATIONS)
def pw_hash_nullable(db):
@@ -425,7 +427,7 @@ class RequestToken_v0(declarative_base()):
callback = Column(Unicode, nullable=False, default=u"oob")
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
updated = Column(DateTime, nullable=False, default=datetime.datetime.now)
-
+
class AccessToken_v0(declarative_base()):
"""
Model for representing the access tokens
@@ -438,7 +440,7 @@ class AccessToken_v0(declarative_base()):
request_token = Column(Unicode, ForeignKey(RequestToken_v0.token))
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
updated = Column(DateTime, nullable=False, default=datetime.datetime.now)
-
+
class NonceTimestamp_v0(declarative_base()):
"""
@@ -460,3 +462,15 @@ def create_oauth1_tables(db):
NonceTimestamp_v0.__table__.create(db.bind)
db.commit()
+
+
+@RegisterMigration(15, MIGRATIONS)
+def wants_notifications(db):
+ """Add a wants_notifications field to User model"""
+ metadata = MetaData(bind=db.bind)
+ user_table = inspect_table(metadata, "core__users")
+
+ col = Column('wants_notifications', Boolean, default=True)
+ col.create(user_table)
+
+ db.commit()
diff --git a/mediagoblin/db/models.py b/mediagoblin/db/models.py
index f0cbce2a..4341e086 100644
--- a/mediagoblin/db/models.py
+++ b/mediagoblin/db/models.py
@@ -69,6 +69,7 @@ class User(Base, UserMixin):
# Intented to be nullable=False, but migrations would not work for it
# set to nullable=True implicitly.
wants_comment_notification = Column(Boolean, default=True)
+ wants_notifications = Column(Boolean, default=True)
license_preference = Column(Unicode)
is_admin = Column(Boolean, default=False, nullable=False)
url = Column(Unicode)
@@ -146,7 +147,7 @@ class RequestToken(Base):
callback = Column(Unicode, nullable=False, default=u"oob")
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
updated = Column(DateTime, nullable=False, default=datetime.datetime.now)
-
+
class AccessToken(Base):
"""
Model for representing the access tokens
@@ -159,7 +160,7 @@ class AccessToken(Base):
request_token = Column(Unicode, ForeignKey(RequestToken.token))
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
updated = Column(DateTime, nullable=False, default=datetime.datetime.now)
-
+
class NonceTimestamp(Base):
"""
@@ -646,13 +647,13 @@ with_polymorphic(
[ProcessingNotification, CommentNotification])
MODELS = [
- User, Client, RequestToken, AccessToken, NonceTimestamp, MediaEntry, Tag,
- MediaTag, MediaComment, Collection, CollectionItem, MediaFile, FileKeynames,
+ User, Client, RequestToken, AccessToken, NonceTimestamp, MediaEntry, Tag,
+ MediaTag, MediaComment, Collection, CollectionItem, MediaFile, FileKeynames,
MediaAttachmentFile, ProcessingMetaData, Notification, CommentNotification,
ProcessingNotification, CommentSubscription]
"""
- Foundations are the default rows that are created immediately after the tables
+ Foundations are the default rows that are created immediately after the tables
are initialized. Each entry to this dictionary should be in the format of:
ModelConstructorObject:List of Dictionaries
(Each Dictionary represents a row on the Table to be created, containing each
diff --git a/mediagoblin/edit/forms.py b/mediagoblin/edit/forms.py
index 85c243a0..5de1bf96 100644
--- a/mediagoblin/edit/forms.py
+++ b/mediagoblin/edit/forms.py
@@ -67,6 +67,8 @@ class EditAccountForm(wtforms.Form):
normalize_user_or_email_field(allow_user=False)])
wants_comment_notification = wtforms.BooleanField(
description=_("Email me when others comment on my media"))
+ wants_notifications = wtforms.BooleanField(
+ description=_("Enable/Disable insite notifications"))
license_preference = wtforms.SelectField(
_('License preference'),
[
diff --git a/mediagoblin/edit/views.py b/mediagoblin/edit/views.py
index 6aa2acd9..a11cb932 100644
--- a/mediagoblin/edit/views.py
+++ b/mediagoblin/edit/views.py
@@ -228,10 +228,12 @@ def edit_account(request):
user = request.user
form = forms.EditAccountForm(request.form,
wants_comment_notification=user.wants_comment_notification,
- license_preference=user.license_preference)
+ license_preference=user.license_preference,
+ wants_notifications=user.wants_notifications)
if request.method == 'POST' and form.validate():
user.wants_comment_notification = form.wants_comment_notification.data
+ user.wants_notifications = form.wants_notifications.data
user.license_preference = form.license_preference.data
diff --git a/mediagoblin/gmg_commands/__init__.py b/mediagoblin/gmg_commands/__init__.py
index d8156126..165a76fd 100644
--- a/mediagoblin/gmg_commands/__init__.py
+++ b/mediagoblin/gmg_commands/__init__.py
@@ -45,6 +45,10 @@ SUBCOMMAND_MAP = {
'setup': 'mediagoblin.gmg_commands.assetlink:assetlink_parser_setup',
'func': 'mediagoblin.gmg_commands.assetlink:assetlink',
'help': 'Link assets for themes and plugins for static serving'},
+ 'reprocess': {
+ 'setup': 'mediagoblin.gmg_commands.reprocess:reprocess_parser_setup',
+ 'func': 'mediagoblin.gmg_commands.reprocess:reprocess',
+ 'help': 'Reprocess media entries'},
# 'theme': {
# 'setup': 'mediagoblin.gmg_commands.theme:theme_parser_setup',
# 'func': 'mediagoblin.gmg_commands.theme:theme',
diff --git a/mediagoblin/gmg_commands/import_export.py b/mediagoblin/gmg_commands/import_export.py
index 98ec617d..fbac09f6 100644
--- a/mediagoblin/gmg_commands/import_export.py
+++ b/mediagoblin/gmg_commands/import_export.py
@@ -16,6 +16,7 @@
from mediagoblin import mg_globals
from mediagoblin.db.open import setup_connection_and_db_from_config
+from mediagoblin.gmg_commands import util as commands_util
from mediagoblin.storage.filestorage import BasicFileStorage
from mediagoblin.init import setup_storage, setup_global_and_app_config
@@ -223,6 +224,7 @@ def env_export(args):
'''
Export database and media files to a tar archive
'''
+ commands_util.check_unrecognized_args(args)
if args.cache_path:
if os.path.exists(args.cache_path):
_log.error('The cache directory must not exist '
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
new file mode 100644
index 00000000..e2f19ea3
--- /dev/null
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -0,0 +1,302 @@
+# GNU MediaGoblin -- federated, autonomous media hosting
+# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import argparse
+import os
+
+from mediagoblin import mg_globals
+from mediagoblin.db.models import MediaEntry
+from mediagoblin.gmg_commands import util as commands_util
+from mediagoblin.submit.lib import run_process_media
+from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
+from mediagoblin.tools.pluginapi import hook_handle
+from mediagoblin.processing import (
+ ProcessorDoesNotExist, ProcessorNotEligible,
+ get_entry_and_processing_manager, get_processing_manager_for_type,
+ ProcessingManagerDoesNotExist)
+
+
+def reprocess_parser_setup(subparser):
+ subparser.add_argument(
+ '--celery',
+ action='store_true',
+ help="Don't process eagerly, pass off to celery")
+
+ subparsers = subparser.add_subparsers(dest="reprocess_subcommand")
+
+ ###################
+ # available command
+ ###################
+ available_parser = subparsers.add_parser(
+ "available",
+ help="Find out what actions are available for this media")
+
+ available_parser.add_argument(
+ "id_or_type",
+ help="Media id or media type to check")
+
+ available_parser.add_argument(
+ "--action-help",
+ action="store_true",
+ help="List argument help for each action available")
+
+ available_parser.add_argument(
+ "--state",
+ help="The state of media you would like to reprocess")
+
+
+ #############
+ # run command
+ #############
+
+ run_parser = subparsers.add_parser(
+ "run",
+ help="Run a reprocessing on one or more media")
+
+ run_parser.add_argument(
+ 'media_id',
+ help="The media_entry id(s) you wish to reprocess.")
+
+ run_parser.add_argument(
+ 'reprocess_command',
+ help="The reprocess command you intend to run")
+
+ run_parser.add_argument(
+ 'reprocess_args',
+ nargs=argparse.REMAINDER,
+ help="rest of arguments to the reprocessing tool")
+
+
+ ################
+ # thumbs command
+ ################
+ thumbs = subparsers.add_parser(
+ 'thumbs',
+ help='Regenerate thumbs for all processed media')
+
+ thumbs.add_argument(
+ '--size',
+ nargs=2,
+ type=int,
+ metavar=('max_width', 'max_height'))
+
+ #################
+ # initial command
+ #################
+ subparsers.add_parser(
+ 'initial',
+ help='Reprocess all failed media')
+
+ ##################
+ # bulk_run command
+ ##################
+ bulk_run_parser = subparsers.add_parser(
+ 'bulk_run',
+ help='Run reprocessing on a given media type or state')
+
+ bulk_run_parser.add_argument(
+ 'type',
+ help='The type of media you would like to process')
+
+ bulk_run_parser.add_argument(
+ '--state',
+ default='processed',
+ nargs='?',
+ help='The state of the media you would like to process. Defaults to' \
+ " 'processed'")
+
+ bulk_run_parser.add_argument(
+ 'reprocess_command',
+ help='The reprocess command you intend to run')
+
+ bulk_run_parser.add_argument(
+ 'reprocess_args',
+ nargs=argparse.REMAINDER,
+ help='The rest of the arguments to the reprocessing tool')
+
+ ###############
+ # help command?
+ ###############
+
+
+def available(args):
+ # Get the media type, either by looking up media id, or by specific type
+ try:
+ media_id = int(args.id_or_type)
+ media_entry, manager = get_entry_and_processing_manager(media_id)
+ media_type = media_entry.media_type
+ except ValueError:
+ media_type = args.id_or_type
+ media_entry = None
+ manager = get_processing_manager_for_type(media_type)
+ except ProcessingManagerDoesNotExist:
+ entry = MediaEntry.query.filter_by(id=args.id_or_type).first()
+ print 'No such processing manager for {0}'.format(entry.media_type)
+
+ if args.state:
+ processors = manager.list_all_processors_by_state(args.state)
+ elif media_entry is None:
+ processors = manager.list_all_processors()
+ else:
+ processors = manager.list_eligible_processors(media_entry)
+
+ print "Available processors:"
+ print "====================="
+ print ""
+
+ if args.action_help:
+ for processor in processors:
+ print processor.name
+ print "-" * len(processor.name)
+
+ parser = processor.generate_parser()
+ parser.print_help()
+ print ""
+
+ else:
+ for processor in processors:
+ if processor.description:
+ print " - %s: %s" % (processor.name, processor.description)
+ else:
+ print " - %s" % processor.name
+
+
+def run(args, media_id=None):
+ if not media_id:
+ media_id = args.media_id
+ try:
+ media_entry, manager = get_entry_and_processing_manager(media_id)
+
+ # TODO: (maybe?) This could probably be handled entirely by the
+ # processor class...
+ try:
+ processor_class = manager.get_processor(
+ args.reprocess_command, media_entry)
+ except ProcessorDoesNotExist:
+ print 'No such processor "%s" for media with id "%s"' % (
+ args.reprocess_command, media_entry.id)
+ return
+ except ProcessorNotEligible:
+ print 'Processor "%s" exists but media "%s" is not eligible' % (
+ args.reprocess_command, media_entry.id)
+ return
+
+ reprocess_parser = processor_class.generate_parser()
+ reprocess_args = reprocess_parser.parse_args(args.reprocess_args)
+ reprocess_request = processor_class.args_to_request(reprocess_args)
+ run_process_media(
+ media_entry,
+ reprocess_action=args.reprocess_command,
+ reprocess_info=reprocess_request)
+
+ except ProcessingManagerDoesNotExist:
+ entry = MediaEntry.query.filter_by(id=media_id).first()
+ print 'No such processing manager for {0}'.format(entry.media_type)
+
+
+def bulk_run(args):
+ """
+ Bulk reprocessing of a given media_type
+ """
+ query = MediaEntry.query.filter_by(media_type=args.type,
+ state=args.state)
+
+ for entry in query:
+ run(args, entry.id)
+
+
+def thumbs(args):
+ """
+ Regenerate thumbs for all processed media
+ """
+ query = MediaEntry.query.filter_by(state='processed')
+
+ for entry in query:
+ try:
+ media_entry, manager = get_entry_and_processing_manager(entry.id)
+
+ # TODO: (maybe?) This could probably be handled entirely by the
+ # processor class...
+ try:
+ processor_class = manager.get_processor(
+ 'resize', media_entry)
+ except ProcessorDoesNotExist:
+ print 'No such processor "%s" for media with id "%s"' % (
+ 'resize', media_entry.id)
+ return
+ except ProcessorNotEligible:
+ print 'Processor "%s" exists but media "%s" is not eligible' % (
+ 'resize', media_entry.id)
+ return
+
+ reprocess_parser = processor_class.generate_parser()
+
+ # prepare filetype and size to be passed into reprocess_parser
+ if args.size:
+ extra_args = 'thumb --{0} {1} {2}'.format(
+ processor_class.thumb_size,
+ args.size[0],
+ args.size[1])
+ else:
+ extra_args = 'thumb'
+
+ reprocess_args = reprocess_parser.parse_args(extra_args.split())
+ reprocess_request = processor_class.args_to_request(reprocess_args)
+ run_process_media(
+ media_entry,
+ reprocess_action='resize',
+ reprocess_info=reprocess_request)
+
+ except ProcessingManagerDoesNotExist:
+ print 'No such processing manager for {0}'.format(entry.media_type)
+
+
+def initial(args):
+ """
+ Reprocess all failed media
+ """
+ query = MediaEntry.query.filter_by(state='failed')
+
+ for entry in query:
+ try:
+ media_entry, manager = get_entry_and_processing_manager(entry.id)
+ run_process_media(
+ media_entry,
+ reprocess_action='initial')
+ except ProcessingManagerDoesNotExist:
+ print 'No such processing manager for {0}'.format(entry.media_type)
+
+
+def reprocess(args):
+ # Run eagerly unless explicetly set not to
+ if not args.celery:
+ os.environ['CELERY_ALWAYS_EAGER'] = 'true'
+
+ commands_util.setup_app(args)
+
+ if args.reprocess_subcommand == "run":
+ run(args)
+
+ elif args.reprocess_subcommand == "available":
+ available(args)
+
+ elif args.reprocess_subcommand == "bulk_run":
+ bulk_run(args)
+
+ elif args.reprocess_subcommand == "thumbs":
+ thumbs(args)
+
+ elif args.reprocess_subcommand == "initial":
+ initial(args)
diff --git a/mediagoblin/gmg_commands/util.py b/mediagoblin/gmg_commands/util.py
index 6a6853d5..63e39ca9 100644
--- a/mediagoblin/gmg_commands/util.py
+++ b/mediagoblin/gmg_commands/util.py
@@ -36,5 +36,5 @@ def prompt_if_not_set(variable, text, password=False):
variable=raw_input(text + u' ')
else:
variable=getpass.getpass(text + u' ')
-
+
return variable
diff --git a/mediagoblin/media_types/ascii/__init__.py b/mediagoblin/media_types/ascii/__init__.py
index 4baf8dd3..b0f7551d 100644
--- a/mediagoblin/media_types/ascii/__init__.py
+++ b/mediagoblin/media_types/ascii/__init__.py
@@ -15,7 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.ascii.processing import process_ascii, \
+from mediagoblin.media_types.ascii.processing import AsciiProcessingManager, \
sniff_handler
from mediagoblin.tools import pluginapi
@@ -29,7 +29,6 @@ def setup_plugin():
class ASCIIMediaManager(MediaManagerBase):
human_readable = "ASCII"
- processor = staticmethod(process_ascii)
display_template = "mediagoblin/media_displays/ascii.html"
default_thumb = "images/media_thumbs/ascii.jpg"
@@ -43,5 +42,6 @@ hooks = {
'setup': setup_plugin,
'get_media_type_and_manager': get_media_type_and_manager,
('media_manager', MEDIA_TYPE): lambda: ASCIIMediaManager,
+ ('reprocess_manager', MEDIA_TYPE): lambda: AsciiProcessingManager,
'sniff_handler': sniff_handler,
}
diff --git a/mediagoblin/media_types/ascii/processing.py b/mediagoblin/media_types/ascii/processing.py
index aca784e8..9b6b3ad4 100644
--- a/mediagoblin/media_types/ascii/processing.py
+++ b/mediagoblin/media_types/ascii/processing.py
@@ -13,6 +13,7 @@
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import argparse
import chardet
import os
try:
@@ -22,7 +23,11 @@ except ImportError:
import logging
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import create_pub_filepath
+from mediagoblin.processing import (
+ create_pub_filepath, FilenameBuilder,
+ MediaProcessor, ProcessingManager,
+ get_process_filename, copy_original,
+ store_public, request_from_args)
from mediagoblin.media_types.ascii import asciitoimage
_log = logging.getLogger(__name__)
@@ -43,106 +48,202 @@ def sniff_handler(media_file, **kw):
return None
-def process_ascii(proc_state):
- """Code to process a txt file. Will be run by celery.
-
- A Workbench() represents a local tempory dir. It is automatically
- cleaned up when this function exits.
+class CommonAsciiProcessor(MediaProcessor):
"""
- entry = proc_state.entry
- workbench = proc_state.workbench
- ascii_config = mgg.global_config['media_type:mediagoblin.media_types.ascii']
- # Conversions subdirectory to avoid collisions
- conversions_subdir = os.path.join(
- workbench.dir, 'conversions')
- os.mkdir(conversions_subdir)
-
- queued_filepath = entry.queued_media_file
- queued_filename = workbench.localized_file(
- mgg.queue_store, queued_filepath,
- 'source')
-
- queued_file = file(queued_filename, 'rb')
-
- with queued_file:
- queued_file_charset = chardet.detect(queued_file.read())
-
- # Only select a non-utf-8 charset if chardet is *really* sure
- # Tested with "Feli\x0109an superjaron", which was detecte
- if queued_file_charset['confidence'] < 0.9:
- interpreted_charset = 'utf-8'
- else:
- interpreted_charset = queued_file_charset['encoding']
-
- _log.info('Charset detected: {0}\nWill interpret as: {1}'.format(
- queued_file_charset,
- interpreted_charset))
-
- queued_file.seek(0) # Rewind the queued file
-
- thumb_filepath = create_pub_filepath(
- entry, 'thumbnail.png')
-
- tmp_thumb_filename = os.path.join(
- conversions_subdir, thumb_filepath[-1])
-
- ascii_converter_args = {}
-
- if ascii_config['thumbnail_font']:
- ascii_converter_args.update(
- {'font': ascii_config['thumbnail_font']})
-
- converter = asciitoimage.AsciiToImage(
- **ascii_converter_args)
-
- thumb = converter._create_image(
- queued_file.read())
-
- with file(tmp_thumb_filename, 'w') as thumb_file:
- thumb.thumbnail(
- (mgg.global_config['media:thumb']['max_width'],
- mgg.global_config['media:thumb']['max_height']),
- Image.ANTIALIAS)
- thumb.save(thumb_file)
-
- _log.debug('Copying local file to public storage')
- mgg.public_store.copy_local_to_storage(
- tmp_thumb_filename, thumb_filepath)
-
- queued_file.seek(0)
+ Provides a base for various ascii processing steps
+ """
+ acceptable_files = ['original', 'unicode']
- original_filepath = create_pub_filepath(entry, queued_filepath[-1])
+ def common_setup(self):
+ self.ascii_config = mgg.global_config[
+ 'media_type:mediagoblin.media_types.ascii']
- with mgg.public_store.get_file(original_filepath, 'wb') \
- as original_file:
- original_file.write(queued_file.read())
+ # Conversions subdirectory to avoid collisions
+ self.conversions_subdir = os.path.join(
+ self.workbench.dir, 'convirsions')
+ os.mkdir(self.conversions_subdir)
- queued_file.seek(0) # Rewind *again*
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
- unicode_filepath = create_pub_filepath(entry, 'ascii-portable.txt')
+ self.charset = None
- with mgg.public_store.get_file(unicode_filepath, 'wb') \
- as unicode_file:
- # Decode the original file from its detected charset (or UTF8)
- # Encode the unicode instance to ASCII and replace any non-ASCII
- # with an HTML entity (&#
- unicode_file.write(
- unicode(queued_file.read().decode(
- interpreted_charset)).encode(
- 'ascii',
- 'xmlcharrefreplace'))
+ def copy_original(self):
+ copy_original(
+ self.entry, self.process_filename,
+ self.name_builder.fill('{basename}{ext}'))
- # Remove queued media file from storage and database.
- # queued_filepath is in the task_id directory which should
- # be removed too, but fail if the directory is not empty to be on
- # the super-safe side.
- mgg.queue_store.delete_file(queued_filepath) # rm file
- mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
- entry.queued_media_file = []
+ def _detect_charset(self, orig_file):
+ d_charset = chardet.detect(orig_file.read())
- media_files_dict = entry.setdefault('media_files', {})
- media_files_dict['thumb'] = thumb_filepath
- media_files_dict['unicode'] = unicode_filepath
- media_files_dict['original'] = original_filepath
+ # Only select a non-utf-8 charset if chardet is *really* sure
+ # Tested with "Feli\x0109an superjaron", which was detected
+ if d_charset['confidence'] < 0.9:
+ self.charset = 'utf-8'
+ else:
+ self.charset = d_charset['encoding']
- entry.save()
+ _log.info('Charset detected: {0}\nWill interpret as: {1}'.format(
+ d_charset,
+ self.charset))
+
+ # Rewind the file
+ orig_file.seek(0)
+
+ def store_unicode_file(self):
+ with file(self.process_filename, 'rb') as orig_file:
+ self._detect_charset(orig_file)
+ unicode_filepath = create_pub_filepath(self.entry,
+ 'ascii-portable.txt')
+
+ with mgg.public_store.get_file(unicode_filepath, 'wb') \
+ as unicode_file:
+ # Decode the original file from its detected charset (or UTF8)
+ # Encode the unicode instance to ASCII and replace any
+ # non-ASCII with an HTML entity (&#
+ unicode_file.write(
+ unicode(orig_file.read().decode(
+ self.charset)).encode(
+ 'ascii',
+ 'xmlcharrefreplace'))
+
+ self.entry.media_files['unicode'] = unicode_filepath
+
+ def generate_thumb(self, font=None, thumb_size=None):
+ with file(self.process_filename, 'rb') as orig_file:
+ # If no font kwarg, check config
+ if not font:
+ font = self.ascii_config.get('thumbnail_font', None)
+ if not thumb_size:
+ thumb_size = (mgg.global_config['media:thumb']['max_width'],
+ mgg.global_config['media:thumb']['max_height'])
+
+ tmp_thumb = os.path.join(
+ self.conversions_subdir,
+ self.name_builder.fill('{basename}.thumbnail.png'))
+
+ ascii_converter_args = {}
+
+ # If there is a font from either the config or kwarg, update
+ # ascii_converter_args
+ if font:
+ ascii_converter_args.update(
+ {'font': self.ascii_config['thumbnail_font']})
+
+ converter = asciitoimage.AsciiToImage(
+ **ascii_converter_args)
+
+ thumb = converter._create_image(
+ orig_file.read())
+
+ with file(tmp_thumb, 'w') as thumb_file:
+ thumb.thumbnail(
+ thumb_size,
+ Image.ANTIALIAS)
+ thumb.save(thumb_file)
+
+ _log.debug('Copying local file to public storage')
+ store_public(self.entry, 'thumb', tmp_thumb,
+ self.name_builder.fill('{basename}.thumbnail.jpg'))
+
+
+class InitialProcessor(CommonAsciiProcessor):
+ """
+ Initial processing step for new ascii media
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_width'),
+ type=int)
+
+ parser.add_argument(
+ '--font',
+ help='the thumbnail font')
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['thumb_size', 'font'])
+
+ def process(self, thumb_size=None, font=None):
+ self.common_setup()
+ self.store_unicode_file()
+ self.generate_thumb(thumb_size=thumb_size, font=font)
+ self.copy_original()
+ self.delete_queue_file()
+
+
+class Resizer(CommonAsciiProcessor):
+ """
+ Resizing process steps for processed media
+ """
+ name = 'resize'
+ description = 'Resize thumbnail'
+ thumb_size = 'thumb_size'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ # Needed for gmg reprocess thumbs to work
+ parser.add_argument(
+ 'file',
+ nargs='?',
+ default='thumb',
+ choices=['thumb'])
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['thumb_size', 'file'])
+
+ def process(self, thumb_size=None, file=None):
+ self.common_setup()
+ self.generate_thumb(thumb_size=thumb_size)
+
+
+class AsciiProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
diff --git a/mediagoblin/media_types/audio/__init__.py b/mediagoblin/media_types/audio/__init__.py
index c7ed8d2d..6ad473c8 100644
--- a/mediagoblin/media_types/audio/__init__.py
+++ b/mediagoblin/media_types/audio/__init__.py
@@ -15,7 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.audio.processing import process_audio, \
+from mediagoblin.media_types.audio.processing import AudioProcessingManager, \
sniff_handler
from mediagoblin.tools import pluginapi
@@ -32,8 +32,8 @@ def setup_plugin():
class AudioMediaManager(MediaManagerBase):
human_readable = "Audio"
- processor = staticmethod(process_audio)
display_template = "mediagoblin/media_displays/audio.html"
+ default_thumb = "images/media_thumbs/image.png"
def get_media_type_and_manager(ext):
@@ -45,4 +45,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: AudioMediaManager,
+ ('reprocess_manager', MEDIA_TYPE): lambda: AudioProcessingManager,
}
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 22383bc1..6a506741 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -14,16 +14,19 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import argparse
import logging
-from tempfile import NamedTemporaryFile
import os
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import (create_pub_filepath, BadMediaFail,
- FilenameBuilder, ProgressCallback)
+from mediagoblin.processing import (
+ BadMediaFail, FilenameBuilder,
+ ProgressCallback, MediaProcessor, ProcessingManager,
+ request_from_args, get_process_filename,
+ store_public, copy_original)
-from mediagoblin.media_types.audio.transcoders import (AudioTranscoder,
- AudioThumbnailer)
+from mediagoblin.media_types.audio.transcoders import (
+ AudioTranscoder, AudioThumbnailer)
_log = logging.getLogger(__name__)
@@ -39,121 +42,304 @@ def sniff_handler(media_file, **kw):
_log.debug('Audio discovery raised BadMediaFail')
return None
- if data.is_audio == True and data.is_video == False:
+ if data.is_audio is True and data.is_video is False:
return MEDIA_TYPE
return None
-def process_audio(proc_state):
- """Code to process uploaded audio. Will be run by celery.
-
- A Workbench() represents a local tempory dir. It is automatically
- cleaned up when this function exits.
+class CommonAudioProcessor(MediaProcessor):
+ """
+ Provides a base for various audio processing steps
"""
- entry = proc_state.entry
- workbench = proc_state.workbench
- audio_config = mgg.global_config['media_type:mediagoblin.media_types.audio']
-
- queued_filepath = entry.queued_media_file
- queued_filename = workbench.localized_file(
- mgg.queue_store, queued_filepath,
- 'source')
- name_builder = FilenameBuilder(queued_filename)
-
- webm_audio_filepath = create_pub_filepath(
- entry,
- '{original}.webm'.format(
- original=os.path.splitext(
- queued_filepath[-1])[0]))
-
- if audio_config['keep_original']:
- with open(queued_filename, 'rb') as queued_file:
- original_filepath = create_pub_filepath(
- entry, name_builder.fill('{basename}{ext}'))
-
- with mgg.public_store.get_file(original_filepath, 'wb') as \
- original_file:
- _log.debug('Saving original...')
- original_file.write(queued_file.read())
-
- entry.media_files['original'] = original_filepath
-
- transcoder = AudioTranscoder()
-
- with NamedTemporaryFile(dir=workbench.dir) as webm_audio_tmp:
- progress_callback = ProgressCallback(entry)
-
- transcoder.transcode(
- queued_filename,
- webm_audio_tmp.name,
- quality=audio_config['quality'],
+ acceptable_files = ['original', 'best_quality', 'webm_audio']
+
+ def common_setup(self):
+ """
+ Setup the workbench directory and pull down the original file, add
+ the audio_config, transcoder, thumbnailer and spectrogram_tmp path
+ """
+ self.audio_config = mgg \
+ .global_config['media_type:mediagoblin.media_types.audio']
+
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
+
+ self.transcoder = AudioTranscoder()
+ self.thumbnailer = AudioThumbnailer()
+
+ def copy_original(self):
+ if self.audio_config['keep_original']:
+ copy_original(
+ self.entry, self.process_filename,
+ self.name_builder.fill('{basename}{ext}'))
+
+ def _keep_best(self):
+ """
+ If there is no original, keep the best file that we have
+ """
+ if not self.entry.media_files.get('best_quality'):
+ # Save the best quality file if no original?
+ if not self.entry.media_files.get('original') and \
+ self.entry.media_files.get('webm_audio'):
+ self.entry.media_files['best_quality'] = self.entry \
+ .media_files['webm_audio']
+
+ def transcode(self, quality=None):
+ if not quality:
+ quality = self.audio_config['quality']
+
+ progress_callback = ProgressCallback(self.entry)
+ webm_audio_tmp = os.path.join(self.workbench.dir,
+ self.name_builder.fill(
+ '{basename}{ext}'))
+
+ self.transcoder.transcode(
+ self.process_filename,
+ webm_audio_tmp,
+ quality=quality,
progress_callback=progress_callback)
- transcoder.discover(webm_audio_tmp.name)
+ self.transcoder.discover(webm_audio_tmp)
+
+ self._keep_best()
_log.debug('Saving medium...')
- mgg.public_store.get_file(webm_audio_filepath, 'wb').write(
- webm_audio_tmp.read())
-
- entry.media_files['webm_audio'] = webm_audio_filepath
-
- # entry.media_data_init(length=int(data.audiolength))
-
- if audio_config['create_spectrogram']:
- spectrogram_filepath = create_pub_filepath(
- entry,
- '{original}-spectrogram.jpg'.format(
- original=os.path.splitext(
- queued_filepath[-1])[0]))
-
- with NamedTemporaryFile(dir=workbench.dir, suffix='.ogg') as wav_tmp:
- _log.info('Creating OGG source for spectrogram')
- transcoder.transcode(
- queued_filename,
- wav_tmp.name,
- mux_string='vorbisenc quality={0} ! oggmux'.format(
- audio_config['quality']))
-
- thumbnailer = AudioThumbnailer()
-
- with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as spectrogram_tmp:
- thumbnailer.spectrogram(
- wav_tmp.name,
- spectrogram_tmp.name,
- width=mgg.global_config['media:medium']['max_width'],
- fft_size=audio_config['spectrogram_fft_size'])
-
- _log.debug('Saving spectrogram...')
- mgg.public_store.get_file(spectrogram_filepath, 'wb').write(
- spectrogram_tmp.read())
-
- entry.media_files['spectrogram'] = spectrogram_filepath
-
- with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as thumb_tmp:
- thumbnailer.thumbnail_spectrogram(
- spectrogram_tmp.name,
- thumb_tmp.name,
- (mgg.global_config['media:thumb']['max_width'],
- mgg.global_config['media:thumb']['max_height']))
-
- thumb_filepath = create_pub_filepath(
- entry,
- '{original}-thumbnail.jpg'.format(
- original=os.path.splitext(
- queued_filepath[-1])[0]))
-
- mgg.public_store.get_file(thumb_filepath, 'wb').write(
- thumb_tmp.read())
-
- entry.media_files['thumb'] = thumb_filepath
- else:
- entry.media_files['thumb'] = ['fake', 'thumb', 'path.jpg']
-
- # Remove queued media file from storage and database.
- # queued_filepath is in the task_id directory which should
- # be removed too, but fail if the directory is not empty to be on
- # the super-safe side.
- mgg.queue_store.delete_file(queued_filepath) # rm file
- mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
- entry.queued_media_file = []
+ store_public(self.entry, 'webm_audio', webm_audio_tmp,
+ self.name_builder.fill('{basename}.medium.webm'))
+
+ def create_spectrogram(self, max_width=None, fft_size=None):
+ if not max_width:
+ max_width = mgg.global_config['media:medium']['max_width']
+ if not fft_size:
+ fft_size = self.audio_config['spectrogram_fft_size']
+
+ wav_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
+ '{basename}.ogg'))
+
+ _log.info('Creating OGG source for spectrogram')
+ self.transcoder.transcode(
+ self.process_filename,
+ wav_tmp,
+ mux_string='vorbisenc quality={0} ! oggmux'.format(
+ self.audio_config['quality']))
+
+ spectrogram_tmp = os.path.join(self.workbench.dir,
+ self.name_builder.fill(
+ '{basename}-spectrogram.jpg'))
+
+ self.thumbnailer.spectrogram(
+ wav_tmp,
+ spectrogram_tmp,
+ width=max_width,
+ fft_size=fft_size)
+
+ _log.debug('Saving spectrogram...')
+ store_public(self.entry, 'spectrogram', spectrogram_tmp,
+ self.name_builder.fill('{basename}.spectrogram.jpg'))
+
+ def generate_thumb(self, size=None):
+ if not size:
+ max_width = mgg.global_config['media:thumb']['max_width']
+ max_height = mgg.global_config['media:thumb']['max_height']
+ size = (max_width, max_height)
+
+ thumb_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
+ '{basename}-thumbnail.jpg'))
+
+ # We need the spectrogram to create a thumbnail
+ spectrogram = self.entry.media_files.get('spectrogram')
+ if not spectrogram:
+ _log.info('No spectrogram found, we will create one.')
+ self.create_spectrogram()
+ spectrogram = self.entry.media_files['spectrogram']
+
+ spectrogram_filepath = mgg.public_store.get_local_path(spectrogram)
+
+ self.thumbnailer.thumbnail_spectrogram(
+ spectrogram_filepath,
+ thumb_tmp,
+ tuple(size))
+
+ store_public(self.entry, 'thumb', thumb_tmp,
+ self.name_builder.fill('{basename}.thumbnail.jpg'))
+
+
+class InitialProcessor(CommonAudioProcessor):
+ """
+ Initial processing steps for new audio
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--quality',
+ type=float,
+ help='vorbisenc quality. Range: -0.1..1')
+
+ parser.add_argument(
+ '--fft_size',
+ type=int,
+ help='spectrogram fft size')
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int,
+ help='minimum size is 100 x 100')
+
+ parser.add_argument(
+ '--medium_width',
+ type=int,
+ help='The width of the spectogram')
+
+ parser.add_argument(
+ '--create_spectrogram',
+ action='store_true',
+ help='Create spectogram and thumbnail, will default to config')
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['create_spectrogram', 'quality', 'fft_size',
+ 'thumb_size', 'medium_width'])
+
+ def process(self, quality=None, fft_size=None, thumb_size=None,
+ create_spectrogram=None, medium_width=None):
+ self.common_setup()
+
+ if not create_spectrogram:
+ create_spectrogram = self.audio_config['create_spectrogram']
+
+ self.transcode(quality=quality)
+ self.copy_original()
+
+ if create_spectrogram:
+ self.create_spectrogram(max_width=medium_width, fft_size=fft_size)
+ self.generate_thumb(size=thumb_size)
+ self.delete_queue_file()
+
+
+class Resizer(CommonAudioProcessor):
+ """
+ Thumbnail and spectogram resizing process steps for processed audio
+ """
+ name = 'resize'
+ description = 'Resize thumbnail or spectogram'
+ thumb_size = 'thumb_size'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media entry is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--fft_size',
+ type=int,
+ help='spectrogram fft size')
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int,
+ help='minimum size is 100 x 100')
+
+ parser.add_argument(
+ '--medium_width',
+ type=int,
+ help='The width of the spectogram')
+
+ parser.add_argument(
+ 'file',
+ choices=['thumb', 'spectrogram'])
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['thumb_size', 'file', 'fft_size', 'medium_width'])
+
+ def process(self, file, thumb_size=None, fft_size=None,
+ medium_width=None):
+ self.common_setup()
+
+ if file == 'thumb':
+ self.generate_thumb(size=thumb_size)
+ elif file == 'spectrogram':
+ self.create_spectrogram(max_width=medium_width, fft_size=fft_size)
+
+
+class Transcoder(CommonAudioProcessor):
+ """
+ Transcoding processing steps for processed audio
+ """
+ name = 'transcode'
+ description = 'Re-transcode audio'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--quality',
+ help='vorbisenc quality. Range: -0.1..1')
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['quality'])
+
+ def process(self, quality=None):
+ self.common_setup()
+ self.transcode(quality=quality)
+
+
+class AudioProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
+ self.add_processor(Transcoder)
diff --git a/mediagoblin/media_types/audio/transcoders.py b/mediagoblin/media_types/audio/transcoders.py
index 84e6af7e..150dad8e 100644
--- a/mediagoblin/media_types/audio/transcoders.py
+++ b/mediagoblin/media_types/audio/transcoders.py
@@ -122,8 +122,7 @@ class AudioThumbnailer(object):
int(start_x), 0,
int(stop_x), int(im_h)))
- if th.size[0] > th_w or th.size[1] > th_h:
- th.thumbnail(thumb_size, Image.ANTIALIAS)
+ th.thumbnail(thumb_size, Image.ANTIALIAS)
th.save(dst)
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index bf42e0b3..380304e6 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -14,12 +14,15 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
+import logging
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.image.processing import process_image, \
- sniff_handler
+from mediagoblin.media_types.image.processing import sniff_handler, \
+ ImageProcessingManager
from mediagoblin.tools import pluginapi
+_log = logging.getLogger(__name__)
+
ACCEPTED_EXTENSIONS = ["jpg", "jpeg", "png", "gif", "tiff"]
MEDIA_TYPE = 'mediagoblin.media_types.image'
@@ -31,7 +34,6 @@ def setup_plugin():
class ImageMediaManager(MediaManagerBase):
human_readable = "Image"
- processor = staticmethod(process_image)
display_template = "mediagoblin/media_displays/image.html"
default_thumb = "images/media_thumbs/image.png"
@@ -69,4 +71,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: ImageMediaManager,
+ ('reprocess_manager', MEDIA_TYPE): lambda: ImageProcessingManager,
}
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index baf2ac7e..088979bc 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -20,9 +20,14 @@ except ImportError:
import Image
import os
import logging
+import argparse
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import BadMediaFail, FilenameBuilder
+from mediagoblin.processing import (
+ BadMediaFail, FilenameBuilder,
+ MediaProcessor, ProcessingManager,
+ request_from_args, get_process_filename,
+ store_public, copy_original)
from mediagoblin.tools.exif import exif_fix_image_orientation, \
extract_exif, clean_exif, get_gps_data, get_useful, \
exif_image_needs_rotation
@@ -38,8 +43,8 @@ PIL_FILTERS = {
MEDIA_TYPE = 'mediagoblin.media_types.image'
-def resize_image(proc_state, resized, keyname, target_name, new_size,
- exif_tags, workdir):
+def resize_image(entry, resized, keyname, target_name, new_size,
+ exif_tags, workdir, quality, filter):
"""
Store a resized version of an image and return its pathname.
@@ -51,17 +56,16 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
exif_tags -- EXIF data for the original image
workdir -- directory path for storing converted image files
new_size -- 2-tuple size for the resized image
+ quality -- level of compression used when resizing images
+ filter -- One of BICUBIC, BILINEAR, NEAREST, ANTIALIAS
"""
- config = mgg.global_config['media_type:mediagoblin.media_types.image']
-
resized = exif_fix_image_orientation(resized, exif_tags) # Fix orientation
- filter_config = config['resize_filter']
try:
- resize_filter = PIL_FILTERS[filter_config.upper()]
+ resize_filter = PIL_FILTERS[filter.upper()]
except KeyError:
raise Exception('Filter "{0}" not found, choose one of {1}'.format(
- unicode(filter_config),
+ unicode(filter),
u', '.join(PIL_FILTERS.keys())))
resized.thumbnail(new_size, resize_filter)
@@ -69,32 +73,36 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
# Copy the new file to the conversion subdir, then remotely.
tmp_resized_filename = os.path.join(workdir, target_name)
with file(tmp_resized_filename, 'w') as resized_file:
- resized.save(resized_file, quality=config['quality'])
- proc_state.store_public(keyname, tmp_resized_filename, target_name)
+ resized.save(resized_file, quality=quality)
+ store_public(entry, keyname, tmp_resized_filename, target_name)
+
+def resize_tool(entry,
+ force, keyname, orig_file, target_name,
+ conversions_subdir, exif_tags, quality, filter, new_size=None):
+ # Use the default size if new_size was not given
+ if not new_size:
+ max_width = mgg.global_config['media:' + keyname]['max_width']
+ max_height = mgg.global_config['media:' + keyname]['max_height']
+ new_size = (max_width, max_height)
-def resize_tool(proc_state, force, keyname, target_name,
- conversions_subdir, exif_tags):
- # filename -- the filename of the original image being resized
- filename = proc_state.get_queued_filename()
- max_width = mgg.global_config['media:' + keyname]['max_width']
- max_height = mgg.global_config['media:' + keyname]['max_height']
# If the size of the original file exceeds the specified size for the desized
# file, a target_name file is created and later associated with the media
# entry.
# Also created if the file needs rotation, or if forced.
try:
- im = Image.open(filename)
+ im = Image.open(orig_file)
except IOError:
raise BadMediaFail()
if force \
- or im.size[0] > max_width \
- or im.size[1] > max_height \
+ or im.size[0] > new_size[0]\
+ or im.size[1] > new_size[1]\
or exif_image_needs_rotation(exif_tags):
resize_image(
- proc_state, im, unicode(keyname), target_name,
- (max_width, max_height),
- exif_tags, conversions_subdir)
+ entry, im, unicode(keyname), target_name,
+ tuple(new_size),
+ exif_tags, conversions_subdir,
+ quality, filter)
SUPPORTED_FILETYPES = ['png', 'gif', 'jpg', 'jpeg', 'tiff']
@@ -119,53 +127,210 @@ def sniff_handler(media_file, **kw):
return None
-def process_image(proc_state):
- """Code to process an image. Will be run by celery.
-
- A Workbench() represents a local tempory dir. It is automatically
- cleaned up when this function exits.
+class CommonImageProcessor(MediaProcessor):
"""
- entry = proc_state.entry
- workbench = proc_state.workbench
-
- # Conversions subdirectory to avoid collisions
- conversions_subdir = os.path.join(
- workbench.dir, 'conversions')
- os.mkdir(conversions_subdir)
-
- queued_filename = proc_state.get_queued_filename()
- name_builder = FilenameBuilder(queued_filename)
-
- # EXIF extraction
- exif_tags = extract_exif(queued_filename)
- gps_data = get_gps_data(exif_tags)
-
- # Always create a small thumbnail
- resize_tool(proc_state, True, 'thumb',
- name_builder.fill('{basename}.thumbnail{ext}'),
- conversions_subdir, exif_tags)
-
- # Possibly create a medium
- resize_tool(proc_state, False, 'medium',
- name_builder.fill('{basename}.medium{ext}'),
- conversions_subdir, exif_tags)
-
- # Copy our queued local workbench to its final destination
- proc_state.copy_original(name_builder.fill('{basename}{ext}'))
-
- # Remove queued media file from storage and database
- proc_state.delete_queue_file()
-
- # Insert exif data into database
- exif_all = clean_exif(exif_tags)
-
- if len(exif_all):
- entry.media_data_init(exif_all=exif_all)
-
- if len(gps_data):
- for key in list(gps_data.keys()):
- gps_data['gps_' + key] = gps_data.pop(key)
- entry.media_data_init(**gps_data)
+ Provides a base for various media processing steps
+ """
+ # list of acceptable file keys in order of prefrence for reprocessing
+ acceptable_files = ['original', 'medium']
+
+ def common_setup(self):
+ """
+ Set up the workbench directory and pull down the original file
+ """
+ self.image_config = mgg.global_config[
+ 'media_type:mediagoblin.media_types.image']
+
+ ## @@: Should this be two functions?
+ # Conversions subdirectory to avoid collisions
+ self.conversions_subdir = os.path.join(
+ self.workbench.dir, 'convirsions')
+ os.mkdir(self.conversions_subdir)
+
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
+
+ # Exif extraction
+ self.exif_tags = extract_exif(self.process_filename)
+
+ def generate_medium_if_applicable(self, size=None, quality=None,
+ filter=None):
+ if not quality:
+ quality = self.image_config['quality']
+ if not filter:
+ filter = self.image_config['resize_filter']
+
+ resize_tool(self.entry, False, 'medium', self.process_filename,
+ self.name_builder.fill('{basename}.medium{ext}'),
+ self.conversions_subdir, self.exif_tags, quality,
+ filter, size)
+
+ def generate_thumb(self, size=None, quality=None, filter=None):
+ if not quality:
+ quality = self.image_config['quality']
+ if not filter:
+ filter = self.image_config['resize_filter']
+
+ resize_tool(self.entry, True, 'thumb', self.process_filename,
+ self.name_builder.fill('{basename}.thumbnail{ext}'),
+ self.conversions_subdir, self.exif_tags, quality,
+ filter, size)
+
+ def copy_original(self):
+ copy_original(
+ self.entry, self.process_filename,
+ self.name_builder.fill('{basename}{ext}'))
+
+ def extract_metadata(self):
+ # Is there any GPS data
+ gps_data = get_gps_data(self.exif_tags)
+
+ # Insert exif data into database
+ exif_all = clean_exif(self.exif_tags)
+
+ if len(exif_all):
+ self.entry.media_data_init(exif_all=exif_all)
+
+ if len(gps_data):
+ for key in list(gps_data.keys()):
+ gps_data['gps_' + key] = gps_data.pop(key)
+ self.entry.media_data_init(**gps_data)
+
+
+class InitialProcessor(CommonImageProcessor):
+ """
+ Initial processing step for new images
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
+
+ ###############################
+ # Command line interface things
+ ###############################
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--thumb-size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--filter',
+ choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
+
+ parser.add_argument(
+ '--quality',
+ type=int,
+ help='level of compression used when resizing images')
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'thumb_size', 'filter', 'quality'])
+
+ def process(self, size=None, thumb_size=None, quality=None, filter=None):
+ self.common_setup()
+ self.generate_medium_if_applicable(size=size, filter=filter,
+ quality=quality)
+ self.generate_thumb(size=thumb_size, filter=filter, quality=quality)
+ self.copy_original()
+ self.extract_metadata()
+ self.delete_queue_file()
+
+
+class Resizer(CommonImageProcessor):
+ """
+ Resizing process steps for processed media
+ """
+ name = 'resize'
+ description = 'Resize image'
+ thumb_size = 'size'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ ###############################
+ # Command line interface things
+ ###############################
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--filter',
+ choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
+
+ parser.add_argument(
+ '--quality',
+ type=int,
+ help='level of compression used when resizing images')
+
+ parser.add_argument(
+ 'file',
+ choices=['medium', 'thumb'])
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'file', 'quality', 'filter'])
+
+ def process(self, file, size=None, filter=None, quality=None):
+ self.common_setup()
+ if file == 'medium':
+ self.generate_medium_if_applicable(size=size, filter=filter,
+ quality=quality)
+ elif file == 'thumb':
+ self.generate_thumb(size=size, filter=filter, quality=quality)
+
+
+class ImageProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
if __name__ == '__main__':
diff --git a/mediagoblin/media_types/pdf/__init__.py b/mediagoblin/media_types/pdf/__init__.py
index 67509ddc..bc5c373b 100644
--- a/mediagoblin/media_types/pdf/__init__.py
+++ b/mediagoblin/media_types/pdf/__init__.py
@@ -15,7 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.pdf.processing import process_pdf, \
+from mediagoblin.media_types.pdf.processing import PdfProcessingManager, \
sniff_handler
from mediagoblin.tools import pluginapi
@@ -29,7 +29,6 @@ def setup_plugin():
class PDFMediaManager(MediaManagerBase):
human_readable = "PDF"
- processor = staticmethod(process_pdf)
display_template = "mediagoblin/media_displays/pdf.html"
default_thumb = "images/media_thumbs/pdf.jpg"
@@ -44,4 +43,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: PDFMediaManager,
+ ('reprocess_manager', MEDIA_TYPE): lambda: PdfProcessingManager,
}
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index f35b4376..549def69 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -13,14 +13,18 @@
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import argparse
import os
import logging
import dateutil.parser
from subprocess import PIPE, Popen
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import (create_pub_filepath,
- FilenameBuilder, BadMediaFail)
+from mediagoblin.processing import (
+ FilenameBuilder, BadMediaFail,
+ MediaProcessor, ProcessingManager,
+ request_from_args, get_process_filename,
+ store_public, copy_original)
from mediagoblin.tools.translate import fake_ugettext_passthrough as _
_log = logging.getLogger(__name__)
@@ -230,51 +234,207 @@ def pdf_info(original):
return ret_dict
-def process_pdf(proc_state):
- """Code to process a pdf file. Will be run by celery.
- A Workbench() represents a local tempory dir. It is automatically
- cleaned up when this function exits.
+class CommonPdfProcessor(MediaProcessor):
"""
- entry = proc_state.entry
- workbench = proc_state.workbench
-
- queued_filename = proc_state.get_queued_filename()
- name_builder = FilenameBuilder(queued_filename)
-
- # Copy our queued local workbench to its final destination
- original_dest = name_builder.fill('{basename}{ext}')
- proc_state.copy_original(original_dest)
-
- # Create a pdf if this is a different doc, store pdf for viewer
- ext = queued_filename.rsplit('.', 1)[-1].lower()
- if ext == 'pdf':
- pdf_filename = queued_filename
- else:
- pdf_filename = queued_filename.rsplit('.', 1)[0] + '.pdf'
+ Provides a base for various pdf processing steps
+ """
+ acceptable_files = ['original', 'pdf']
+
+ def common_setup(self):
+ """
+ Set up common pdf processing steps
+ """
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
+
+ self._set_pdf_filename()
+
+ def _set_pdf_filename(self):
+ if self.name_builder.ext == '.pdf':
+ self.pdf_filename = self.process_filename
+ elif self.entry.media_files.get('pdf'):
+ self.pdf_filename = self.workbench.localized_file(
+ mgg.public_store, self.entry.media_files['pdf'])
+ else:
+ self.pdf_filename = self._generate_pdf()
+
+ def copy_original(self):
+ copy_original(
+ self.entry, self.process_filename,
+ self.name_builder.fill('{basename}{ext}'))
+
+ def generate_thumb(self, thumb_size=None):
+ if not thumb_size:
+ thumb_size = (mgg.global_config['media:thumb']['max_width'],
+ mgg.global_config['media:thumb']['max_height'])
+
+ # Note: pdftocairo adds '.png', so don't include an ext
+ thumb_filename = os.path.join(self.workbench.dir,
+ self.name_builder.fill(
+ '{basename}.thumbnail'))
+
+ executable = where('pdftocairo')
+ args = [executable, '-scale-to', str(min(thumb_size)),
+ '-singlefile', '-png', self.pdf_filename, thumb_filename]
+
+ _log.debug('calling {0}'.format(repr(' '.join(args))))
+ Popen(executable=executable, args=args).wait()
+
+ # since pdftocairo added '.png', we need to include it with the
+ # filename
+ store_public(self.entry, 'thumb', thumb_filename + '.png',
+ self.name_builder.fill('{basename}.thumbnail.png'))
+
+ def _generate_pdf(self):
+ """
+ Store the pdf. If the file is not a pdf, make it a pdf
+ """
+ tmp_pdf = self.process_filename
+
unoconv = where('unoconv')
Popen(executable=unoconv,
- args=[unoconv, '-v', '-f', 'pdf', queued_filename]).wait()
- if not os.path.exists(pdf_filename):
+ args=[unoconv, '-v', '-f', 'pdf', self.process_filename]).wait()
+
+ if not os.path.exists(tmp_pdf):
_log.debug('unoconv failed to convert file to pdf')
raise BadMediaFail()
- proc_state.store_public(keyname=u'pdf', local_file=pdf_filename)
-
- pdf_info_dict = pdf_info(pdf_filename)
-
- for name, width, height in [
- (u'thumb', mgg.global_config['media:thumb']['max_width'],
- mgg.global_config['media:thumb']['max_height']),
- (u'medium', mgg.global_config['media:medium']['max_width'],
- mgg.global_config['media:medium']['max_height']),
- ]:
- filename = name_builder.fill('{basename}.%s.png' % name)
- path = workbench.joinpath(filename)
- create_pdf_thumb(pdf_filename, path, width, height)
- assert(os.path.exists(path))
- proc_state.store_public(keyname=name, local_file=path)
-
- proc_state.delete_queue_file()
-
- entry.media_data_init(**pdf_info_dict)
- entry.save()
+
+ store_public(self.entry, 'pdf', tmp_pdf,
+ self.name_builder.fill('{basename}.pdf'))
+
+ return self.workbench.localized_file(
+ mgg.public_store, self.entry.media_files['pdf'])
+
+ def extract_pdf_info(self):
+ pdf_info_dict = pdf_info(self.pdf_filename)
+ self.entry.media_data_init(**pdf_info_dict)
+
+ def generate_medium(self, size=None):
+ if not size:
+ size = (mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
+
+ # Note: pdftocairo adds '.png', so don't include an ext
+ filename = os.path.join(self.workbench.dir,
+ self.name_builder.fill('{basename}.medium'))
+
+ executable = where('pdftocairo')
+ args = [executable, '-scale-to', str(min(size)),
+ '-singlefile', '-png', self.pdf_filename, filename]
+
+ _log.debug('calling {0}'.format(repr(' '.join(args))))
+ Popen(executable=executable, args=args).wait()
+
+ # since pdftocairo added '.png', we need to include it with the
+ # filename
+ store_public(self.entry, 'medium', filename + '.png',
+ self.name_builder.fill('{basename}.medium.png'))
+
+
+class InitialProcessor(CommonPdfProcessor):
+ """
+ Initial processing step for new pdfs
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--thumb-size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'thumb_size'])
+
+ def process(self, size=None, thumb_size=None):
+ self.common_setup()
+ self.extract_pdf_info()
+ self.copy_original()
+ self.generate_medium(size=size)
+ self.generate_thumb(thumb_size=thumb_size)
+ self.delete_queue_file()
+
+
+class Resizer(CommonPdfProcessor):
+ """
+ Resizing process steps for processed pdfs
+ """
+ name = 'resize'
+ description = 'Resize thumbnail and medium'
+ thumb_size = 'size'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ 'file',
+ choices=['medium', 'thumb'])
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'file'])
+
+ def process(self, file, size=None):
+ self.common_setup()
+ if file == 'medium':
+ self.generate_medium(size=size)
+ elif file == 'thumb':
+ self.generate_thumb(thumb_size=size)
+
+
+class PdfProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
diff --git a/mediagoblin/media_types/stl/__init__.py b/mediagoblin/media_types/stl/__init__.py
index 1d2a8478..7170a45b 100644
--- a/mediagoblin/media_types/stl/__init__.py
+++ b/mediagoblin/media_types/stl/__init__.py
@@ -15,7 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.stl.processing import process_stl, \
+from mediagoblin.media_types.stl.processing import StlProcessingManager, \
sniff_handler
from mediagoblin.tools import pluginapi
@@ -29,7 +29,6 @@ def setup_plugin():
class STLMediaManager(MediaManagerBase):
human_readable = "stereo lithographics"
- processor = staticmethod(process_stl)
display_template = "mediagoblin/media_displays/stl.html"
default_thumb = "images/media_thumbs/video.jpg"
@@ -43,4 +42,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: STLMediaManager,
+ ('reprocess_manager', MEDIA_TYPE): lambda: StlProcessingManager,
}
diff --git a/mediagoblin/media_types/stl/processing.py b/mediagoblin/media_types/stl/processing.py
index 53751416..77d3d86e 100644
--- a/mediagoblin/media_types/stl/processing.py
+++ b/mediagoblin/media_types/stl/processing.py
@@ -14,6 +14,7 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import argparse
import os
import json
import logging
@@ -21,8 +22,11 @@ import subprocess
import pkg_resources
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import create_pub_filepath, \
- FilenameBuilder
+from mediagoblin.processing import (
+ FilenameBuilder, MediaProcessor,
+ ProcessingManager, request_from_args,
+ get_process_filename, store_public,
+ copy_original)
from mediagoblin.media_types.stl import model_loader
@@ -75,49 +79,61 @@ def blender_render(config):
env=env)
-def process_stl(proc_state):
- """Code to process an stl or obj model. Will be run by celery.
-
- A Workbench() represents a local tempory dir. It is automatically
- cleaned up when this function exits.
+class CommonStlProcessor(MediaProcessor):
"""
- entry = proc_state.entry
- workbench = proc_state.workbench
-
- queued_filepath = entry.queued_media_file
- queued_filename = workbench.localized_file(
- mgg.queue_store, queued_filepath, 'source')
- name_builder = FilenameBuilder(queued_filename)
-
- ext = queued_filename.lower().strip()[-4:]
- if ext.startswith("."):
- ext = ext[1:]
- else:
- ext = None
-
- # Attempt to parse the model file and divine some useful
- # information about it.
- with open(queued_filename, 'rb') as model_file:
- model = model_loader.auto_detect(model_file, ext)
-
- # generate preview images
- greatest = [model.width, model.height, model.depth]
- greatest.sort()
- greatest = greatest[-1]
-
- def snap(name, camera, width=640, height=640, project="ORTHO"):
- filename = name_builder.fill(name)
- workbench_path = workbench.joinpath(filename)
+ Provides a common base for various stl processing steps
+ """
+ acceptable_files = ['original']
+
+ def common_setup(self):
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
+
+ self._set_ext()
+ self._set_model()
+ self._set_greatest()
+
+ def _set_ext(self):
+ ext = self.name_builder.ext[1:]
+
+ if not ext:
+ ext = None
+
+ self.ext = ext
+
+ def _set_model(self):
+ """
+ Attempt to parse the model file and divine some useful
+ information about it.
+ """
+ with open(self.process_filename, 'rb') as model_file:
+ self.model = model_loader.auto_detect(model_file, self.ext)
+
+ def _set_greatest(self):
+ greatest = [self.model.width, self.model.height, self.model.depth]
+ greatest.sort()
+ self.greatest = greatest[-1]
+
+ def copy_original(self):
+ copy_original(
+ self.entry, self.process_filename,
+ self.name_builder.fill('{basename}{ext}'))
+
+ def _snap(self, keyname, name, camera, size, project="ORTHO"):
+ filename = self.name_builder.fill(name)
+ workbench_path = self.workbench.joinpath(filename)
shot = {
- "model_path": queued_filename,
- "model_ext": ext,
+ "model_path": self.process_filename,
+ "model_ext": self.ext,
"camera_coord": camera,
- "camera_focus": model.average,
- "camera_clip": greatest*10,
- "greatest": greatest,
+ "camera_focus": self.model.average,
+ "camera_clip": self.greatest*10,
+ "greatest": self.greatest,
"projection": project,
- "width": width,
- "height": height,
+ "width": size[0],
+ "height": size[1],
"out_file": workbench_path,
}
blender_render(shot)
@@ -126,70 +142,191 @@ def process_stl(proc_state):
assert os.path.exists(workbench_path)
# copy it up!
- with open(workbench_path, 'rb') as rendered_file:
- public_path = create_pub_filepath(entry, filename)
-
- with mgg.public_store.get_file(public_path, "wb") as public_file:
- public_file.write(rendered_file.read())
-
- return public_path
-
- thumb_path = snap(
- "{basename}.thumb.jpg",
- [0, greatest*-1.5, greatest],
- mgg.global_config['media:thumb']['max_width'],
- mgg.global_config['media:thumb']['max_height'],
- project="PERSP")
-
- perspective_path = snap(
- "{basename}.perspective.jpg",
- [0, greatest*-1.5, greatest], project="PERSP")
-
- topview_path = snap(
- "{basename}.top.jpg",
- [model.average[0], model.average[1], greatest*2])
-
- frontview_path = snap(
- "{basename}.front.jpg",
- [model.average[0], greatest*-2, model.average[2]])
-
- sideview_path = snap(
- "{basename}.side.jpg",
- [greatest*-2, model.average[1], model.average[2]])
-
- ## Save the public file stuffs
- model_filepath = create_pub_filepath(
- entry, name_builder.fill('{basename}{ext}'))
-
- with mgg.public_store.get_file(model_filepath, 'wb') as model_file:
- with open(queued_filename, 'rb') as queued_file:
- model_file.write(queued_file.read())
-
- # Remove queued media file from storage and database.
- # queued_filepath is in the task_id directory which should
- # be removed too, but fail if the directory is not empty to be on
- # the super-safe side.
- mgg.queue_store.delete_file(queued_filepath) # rm file
- mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
- entry.queued_media_file = []
-
- # Insert media file information into database
- media_files_dict = entry.setdefault('media_files', {})
- media_files_dict[u'original'] = model_filepath
- media_files_dict[u'thumb'] = thumb_path
- media_files_dict[u'perspective'] = perspective_path
- media_files_dict[u'top'] = topview_path
- media_files_dict[u'side'] = sideview_path
- media_files_dict[u'front'] = frontview_path
-
- # Put model dimensions into the database
- dimensions = {
- "center_x" : model.average[0],
- "center_y" : model.average[1],
- "center_z" : model.average[2],
- "width" : model.width,
- "height" : model.height,
- "depth" : model.depth,
- "file_type" : ext,
- }
- entry.media_data_init(**dimensions)
+ store_public(self.entry, keyname, workbench_path, filename)
+
+ def generate_thumb(self, thumb_size=None):
+ if not thumb_size:
+ thumb_size = (mgg.global_config['media:thumb']['max_width'],
+ mgg.global_config['media:thumb']['max_height'])
+
+ self._snap(
+ "thumb",
+ "{basename}.thumb.jpg",
+ [0, self.greatest*-1.5, self.greatest],
+ thumb_size,
+ project="PERSP")
+
+ def generate_perspective(self, size=None):
+ if not size:
+ size = (mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
+
+ self._snap(
+ "perspective",
+ "{basename}.perspective.jpg",
+ [0, self.greatest*-1.5, self.greatest],
+ size,
+ project="PERSP")
+
+ def generate_topview(self, size=None):
+ if not size:
+ size = (mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
+
+ self._snap(
+ "top",
+ "{basename}.top.jpg",
+ [self.model.average[0], self.model.average[1],
+ self.greatest*2],
+ size)
+
+ def generate_frontview(self, size=None):
+ if not size:
+ size = (mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
+
+ self._snap(
+ "front",
+ "{basename}.front.jpg",
+ [self.model.average[0], self.greatest*-2,
+ self.model.average[2]],
+ size)
+
+ def generate_sideview(self, size=None):
+ if not size:
+ size = (mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
+
+ self._snap(
+ "side",
+ "{basename}.side.jpg",
+ [self.greatest*-2, self.model.average[1],
+ self.model.average[2]],
+ size)
+
+ def store_dimensions(self):
+ """
+ Put model dimensions into the database
+ """
+ dimensions = {
+ "center_x": self.model.average[0],
+ "center_y": self.model.average[1],
+ "center_z": self.model.average[2],
+ "width": self.model.width,
+ "height": self.model.height,
+ "depth": self.model.depth,
+ "file_type": self.ext,
+ }
+ self.entry.media_data_init(**dimensions)
+
+
+class InitialProcessor(CommonStlProcessor):
+ """
+ Initial processing step for new stls
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'thumb_size'])
+
+ def process(self, size=None, thumb_size=None):
+ self.common_setup()
+ self.generate_thumb(thumb_size=thumb_size)
+ self.generate_perspective(size=size)
+ self.generate_topview(size=size)
+ self.generate_frontview(size=size)
+ self.generate_sideview(size=size)
+ self.store_dimensions()
+ self.copy_original()
+ self.delete_queue_file()
+
+
+class Resizer(CommonStlProcessor):
+ """
+ Resizing process steps for processed stls
+ """
+ name = 'resize'
+ description = 'Resize thumbnail and mediums'
+ thumb_size = 'size'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ 'file',
+ choices=['medium', 'thumb'])
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'file'])
+
+ def process(self, file, size=None):
+ self.common_setup()
+ if file == 'medium':
+ self.generate_perspective(size=size)
+ self.generate_topview(size=size)
+ self.generate_frontview(size=size)
+ self.generate_sideview(size=size)
+ elif file == 'thumb':
+ self.generate_thumb(thumb_size=size)
+
+
+class StlProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
diff --git a/mediagoblin/media_types/video/__init__.py b/mediagoblin/media_types/video/__init__.py
index e8a4308b..0ed19d1b 100644
--- a/mediagoblin/media_types/video/__init__.py
+++ b/mediagoblin/media_types/video/__init__.py
@@ -15,7 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.video.processing import process_video, \
+from mediagoblin.media_types.video.processing import VideoProcessingManager, \
sniff_handler
from mediagoblin.tools import pluginapi
@@ -30,12 +30,11 @@ def setup_plugin():
class VideoMediaManager(MediaManagerBase):
human_readable = "Video"
- processor = staticmethod(process_video)
display_template = "mediagoblin/media_displays/video.html"
default_thumb = "images/media_thumbs/video.jpg"
# Used by the media_entry.get_display_media method
- media_fetch_order = [u'webm_640', u'original']
+ media_fetch_order = [u'webm_video', u'original']
default_webm_type = 'video/webm; codecs="vp8, vorbis"'
@@ -48,4 +47,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: VideoMediaManager,
+ ('reprocess_manager', MEDIA_TYPE): lambda: VideoProcessingManager,
}
diff --git a/mediagoblin/media_types/video/migrations.py b/mediagoblin/media_types/video/migrations.py
index 442bbd8d..d68e2933 100644
--- a/mediagoblin/media_types/video/migrations.py
+++ b/mediagoblin/media_types/video/migrations.py
@@ -20,6 +20,7 @@ from sqlalchemy import MetaData, Column, Unicode
MIGRATIONS = {}
+
@RegisterMigration(1, MIGRATIONS)
def add_orig_metadata_column(db_conn):
metadata = MetaData(bind=db_conn.bind)
@@ -30,3 +31,19 @@ def add_orig_metadata_column(db_conn):
default=None, nullable=True)
col.create(vid_data)
db_conn.commit()
+
+
+@RegisterMigration(2, MIGRATIONS)
+def webm_640_to_webm_video(db):
+ metadata = MetaData(bind=db.bind)
+
+ file_keynames = inspect_table(metadata, 'core__file_keynames')
+
+ for row in db.execute(file_keynames.select()):
+ if row.name == 'webm_640':
+ db.execute(
+ file_keynames.update(). \
+ where(file_keynames.c.id==row.id).\
+ values(name='webm_video'))
+
+ db.commit()
diff --git a/mediagoblin/media_types/video/models.py b/mediagoblin/media_types/video/models.py
index 0b52c53f..be9d258f 100644
--- a/mediagoblin/media_types/video/models.py
+++ b/mediagoblin/media_types/video/models.py
@@ -36,12 +36,12 @@ class VideoData(Base):
- orig_metadata: A loose json structure containing metadata gstreamer
pulled from the original video.
This field is NOT GUARANTEED to exist!
-
+
Likely metadata extracted:
"videoheight", "videolength", "videowidth",
"audiorate", "audiolength", "audiochannels", "audiowidth",
"mimetype", "tags"
-
+
TODO: document the above better.
"""
__tablename__ = "video__mediadata"
@@ -68,7 +68,7 @@ class VideoData(Base):
"""
orig_metadata = self.orig_metadata or {}
- if "webm_640" not in self.get_media_entry.media_files \
+ if "webm_video" not in self.get_media_entry.media_files \
and "mimetype" in orig_metadata \
and "tags" in orig_metadata \
and "audio-codec" in orig_metadata["tags"] \
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
index 857c1647..ed224251 100644
--- a/mediagoblin/media_types/video/processing.py
+++ b/mediagoblin/media_types/video/processing.py
@@ -14,13 +14,18 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import argparse
import os.path
import logging
import datetime
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import \
- create_pub_filepath, FilenameBuilder, BaseProcessingFail, ProgressCallback
+from mediagoblin.processing import (
+ FilenameBuilder, BaseProcessingFail,
+ ProgressCallback, MediaProcessor,
+ ProcessingManager, request_from_args,
+ get_process_filename, store_public,
+ copy_original)
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from . import transcoders
@@ -48,115 +53,15 @@ def sniff_handler(media_file, **kw):
if not data:
_log.error('Could not discover {0}'.format(
- kw.get('media')))
+ kw.get('media')))
return None
- if data['is_video'] == True:
+ if data['is_video'] is True:
return MEDIA_TYPE
return None
-def process_video(proc_state):
- """
- Process a video entry, transcode the queued media files (originals) and
- create a thumbnail for the entry.
-
- A Workbench() represents a local tempory dir. It is automatically
- cleaned up when this function exits.
- """
- entry = proc_state.entry
- workbench = proc_state.workbench
- video_config = mgg.global_config['media_type:mediagoblin.media_types.video']
-
- queued_filepath = entry.queued_media_file
- queued_filename = proc_state.get_queued_filename()
- name_builder = FilenameBuilder(queued_filename)
-
- medium_basename = name_builder.fill('{basename}-640p.webm')
- medium_filepath = create_pub_filepath(entry, medium_basename)
-
- thumbnail_basename = name_builder.fill('{basename}.thumbnail.jpg')
- thumbnail_filepath = create_pub_filepath(entry, thumbnail_basename)
-
- # Create a temporary file for the video destination (cleaned up with workbench)
- tmp_dst = os.path.join(workbench.dir, medium_basename)
- # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
- progress_callback = ProgressCallback(entry)
-
- dimensions = (
- mgg.global_config['media:medium']['max_width'],
- mgg.global_config['media:medium']['max_height'])
-
- # Extract metadata and keep a record of it
- metadata = transcoders.VideoTranscoder().discover(queued_filename)
- store_metadata(entry, metadata)
-
- # Figure out whether or not we need to transcode this video or
- # if we can skip it
- if skip_transcode(metadata):
- _log.debug('Skipping transcoding')
-
- dst_dimensions = metadata['videowidth'], metadata['videoheight']
-
- # Push original file to public storage
- _log.debug('Saving original...')
- proc_state.copy_original(queued_filepath[-1])
-
- did_transcode = False
- else:
- transcoder = transcoders.VideoTranscoder()
-
- transcoder.transcode(queued_filename, tmp_dst,
- vp8_quality=video_config['vp8_quality'],
- vp8_threads=video_config['vp8_threads'],
- vorbis_quality=video_config['vorbis_quality'],
- progress_callback=progress_callback,
- dimensions=dimensions)
-
- dst_dimensions = transcoder.dst_data.videowidth,\
- transcoder.dst_data.videoheight
-
- # Push transcoded video to public storage
- _log.debug('Saving medium...')
- mgg.public_store.copy_local_to_storage(tmp_dst, medium_filepath)
- _log.debug('Saved medium')
-
- entry.media_files['webm_640'] = medium_filepath
-
- did_transcode = True
-
- # Save the width and height of the transcoded video
- entry.media_data_init(
- width=dst_dimensions[0],
- height=dst_dimensions[1])
-
- # Temporary file for the video thumbnail (cleaned up with workbench)
- tmp_thumb = os.path.join(workbench.dir, thumbnail_basename)
-
- # Create a thumbnail.jpg that fits in a 180x180 square
- transcoders.VideoThumbnailerMarkII(
- queued_filename,
- tmp_thumb,
- 180)
-
- # Push the thumbnail to public storage
- _log.debug('Saving thumbnail...')
- mgg.public_store.copy_local_to_storage(tmp_thumb, thumbnail_filepath)
- entry.media_files['thumb'] = thumbnail_filepath
-
- # save the original... but only if we did a transcoding
- # (if we skipped transcoding and just kept the original anyway as the main
- # media, then why would we save the original twice?)
- if video_config['keep_original'] and did_transcode:
- # Push original file to public storage
- _log.debug('Saving original...')
- proc_state.copy_original(queued_filepath[-1])
-
- # Remove queued media file from storage and database
- proc_state.delete_queue_file()
-
-
def store_metadata(media_entry, metadata):
"""
Store metadata from this video for this media entry.
@@ -165,9 +70,9 @@ def store_metadata(media_entry, metadata):
stored_metadata = dict(
[(key, metadata[key])
for key in [
- "videoheight", "videolength", "videowidth",
- "audiorate", "audiolength", "audiochannels", "audiowidth",
- "mimetype"]
+ "videoheight", "videolength", "videowidth",
+ "audiorate", "audiolength", "audiochannels", "audiowidth",
+ "mimetype"]
if key in metadata])
# We have to convert videorate into a sequence because it's a
@@ -186,10 +91,10 @@ def store_metadata(media_entry, metadata):
tags = dict(
[(key, tags_metadata[key])
for key in [
- "application-name", "artist", "audio-codec", "bitrate",
- "container-format", "copyright", "encoder",
- "encoder-version", "license", "nominal-bitrate", "title",
- "video-codec"]
+ "application-name", "artist", "audio-codec", "bitrate",
+ "container-format", "copyright", "encoder",
+ "encoder-version", "license", "nominal-bitrate", "title",
+ "video-codec"]
if key in tags_metadata])
if 'date' in tags_metadata:
date = tags_metadata['date']
@@ -211,3 +116,297 @@ def store_metadata(media_entry, metadata):
if len(stored_metadata):
media_entry.media_data_init(
orig_metadata=stored_metadata)
+
+
+class CommonVideoProcessor(MediaProcessor):
+ """
+ Provides a base for various video processing steps
+ """
+ acceptable_files = ['original', 'best_quality', 'webm_video']
+
+ def common_setup(self):
+ self.video_config = mgg \
+ .global_config['media_type:mediagoblin.media_types.video']
+
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
+
+ self.transcoder = transcoders.VideoTranscoder()
+ self.did_transcode = False
+
+ def copy_original(self):
+ # If we didn't transcode, then we need to keep the original
+ if not self.did_transcode or \
+ (self.video_config['keep_original'] and self.did_transcode):
+ copy_original(
+ self.entry, self.process_filename,
+ self.name_builder.fill('{basename}{ext}'))
+
+ def _keep_best(self):
+ """
+ If there is no original, keep the best file that we have
+ """
+ if not self.entry.media_files.get('best_quality'):
+ # Save the best quality file if no original?
+ if not self.entry.media_files.get('original') and \
+ self.entry.media_files.get('webm_video'):
+ self.entry.media_files['best_quality'] = self.entry \
+ .media_files['webm_video']
+
+
+ def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
+ vorbis_quality=None):
+ progress_callback = ProgressCallback(self.entry)
+ tmp_dst = os.path.join(self.workbench.dir,
+ self.name_builder.fill('{basename}.medium.webm'))
+
+ if not medium_size:
+ medium_size = (
+ mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
+ if not vp8_quality:
+ vp8_quality = self.video_config['vp8_quality']
+ if not vp8_threads:
+ vp8_threads = self.video_config['vp8_threads']
+ if not vorbis_quality:
+ vorbis_quality = self.video_config['vorbis_quality']
+
+ # Extract metadata and keep a record of it
+ metadata = self.transcoder.discover(self.process_filename)
+ store_metadata(self.entry, metadata)
+
+ # Figure out whether or not we need to transcode this video or
+ # if we can skip it
+ if skip_transcode(metadata, medium_size):
+ _log.debug('Skipping transcoding')
+
+ dst_dimensions = metadata['videowidth'], metadata['videoheight']
+
+ # If there is an original and transcoded, delete the transcoded
+ # since it must be of lower quality then the original
+ if self.entry.media_files.get('original') and \
+ self.entry.media_files.get('webm_video'):
+ self.entry.media_files['webm_video'].delete()
+
+ else:
+ self.transcoder.transcode(self.process_filename, tmp_dst,
+ vp8_quality=vp8_quality,
+ vp8_threads=vp8_threads,
+ vorbis_quality=vorbis_quality,
+ progress_callback=progress_callback,
+ dimensions=tuple(medium_size))
+
+ dst_dimensions = self.transcoder.dst_data.videowidth,\
+ self.transcoder.dst_data.videoheight
+
+ self._keep_best()
+
+ # Push transcoded video to public storage
+ _log.debug('Saving medium...')
+ store_public(self.entry, 'webm_video', tmp_dst,
+ self.name_builder.fill('{basename}.medium.webm'))
+ _log.debug('Saved medium')
+
+ self.did_transcode = True
+
+ # Save the width and height of the transcoded video
+ self.entry.media_data_init(
+ width=dst_dimensions[0],
+ height=dst_dimensions[1])
+
+ def generate_thumb(self, thumb_size=None):
+ # Temporary file for the video thumbnail (cleaned up with workbench)
+ tmp_thumb = os.path.join(self.workbench.dir,
+ self.name_builder.fill(
+ '{basename}.thumbnail.jpg'))
+
+ if not thumb_size:
+ thumb_size = (mgg.global_config['media:thumb']['max_width'])
+
+ # We will only use the width so that the correct scale is kept
+ transcoders.VideoThumbnailerMarkII(
+ self.process_filename,
+ tmp_thumb,
+ thumb_size[0])
+
+ # Push the thumbnail to public storage
+ _log.debug('Saving thumbnail...')
+ store_public(self.entry, 'thumb', tmp_thumb,
+ self.name_builder.fill('{basename}.thumbnail.jpg'))
+
+
+class InitialProcessor(CommonVideoProcessor):
+ """
+ Initial processing steps for new video
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--medium_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--vp8_quality',
+ type=int,
+ help='Range 0..10')
+
+ parser.add_argument(
+ '--vp8_threads',
+ type=int,
+ help='0 means number_of_CPUs - 1')
+
+ parser.add_argument(
+ '--vorbis_quality',
+ type=float,
+ help='Range -0.1..1')
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['medium_size', 'vp8_quality', 'vp8_threads',
+ 'vorbis_quality', 'thumb_size'])
+
+ def process(self, medium_size=None, vp8_threads=None, vp8_quality=None,
+ vorbis_quality=None, thumb_size=None):
+ self.common_setup()
+
+ self.transcode(medium_size=medium_size, vp8_quality=vp8_quality,
+ vp8_threads=vp8_threads, vorbis_quality=vorbis_quality)
+
+ self.copy_original()
+ self.generate_thumb(thumb_size=thumb_size)
+ self.delete_queue_file()
+
+
+class Resizer(CommonVideoProcessor):
+ """
+ Video thumbnail resizing process steps for processed media
+ """
+ name = 'resize'
+ description = 'Resize thumbnail'
+ thumb_size = 'thumb_size'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ # Needed for gmg reprocess thumbs to work
+ parser.add_argument(
+ 'file',
+ nargs='?',
+ default='thumb',
+ choices=['thumb'])
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['thumb_size', 'file'])
+
+ def process(self, thumb_size=None, file=None):
+ self.common_setup()
+ self.generate_thumb(thumb_size=thumb_size)
+
+
+class Transcoder(CommonVideoProcessor):
+ """
+ Transcoding processing steps for processed video
+ """
+ name = 'transcode'
+ description = 'Re-transcode video'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--medium_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--vp8_quality',
+ type=int,
+ help='Range 0..10')
+
+ parser.add_argument(
+ '--vp8_threads',
+ type=int,
+ help='0 means number_of_CPUs - 1')
+
+ parser.add_argument(
+ '--vorbis_quality',
+ type=float,
+ help='Range -0.1..1')
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['medium_size', 'vp8_threads', 'vp8_quality',
+ 'vorbis_quality'])
+
+ def process(self, medium_size=None, vp8_quality=None, vp8_threads=None,
+ vorbis_quality=None):
+ self.common_setup()
+ self.transcode(medium_size=medium_size, vp8_threads=vp8_threads,
+ vp8_quality=vp8_quality, vorbis_quality=vorbis_quality)
+
+
+class VideoProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
+ self.add_processor(Transcoder)
diff --git a/mediagoblin/media_types/video/util.py b/mediagoblin/media_types/video/util.py
index 5765ecfb..c33cce5a 100644
--- a/mediagoblin/media_types/video/util.py
+++ b/mediagoblin/media_types/video/util.py
@@ -21,7 +21,7 @@ from mediagoblin import mg_globals as mgg
_log = logging.getLogger(__name__)
-def skip_transcode(metadata):
+def skip_transcode(metadata, size):
'''
Checks video metadata against configuration values for skip_transcode.
@@ -51,9 +51,9 @@ def skip_transcode(metadata):
return False
if config['dimensions_match']:
- if not metadata['videoheight'] <= medium_config['max_height']:
+ if not metadata['videoheight'] <= size[1]:
return False
- if not metadata['videowidth'] <= medium_config['max_width']:
+ if not metadata['videowidth'] <= size[0]:
return False
return True
diff --git a/mediagoblin/notifications/__init__.py b/mediagoblin/notifications/__init__.py
index ed9f8d78..b6f9f478 100644
--- a/mediagoblin/notifications/__init__.py
+++ b/mediagoblin/notifications/__init__.py
@@ -17,7 +17,8 @@
import logging
from mediagoblin.db.models import Notification, \
- CommentNotification, CommentSubscription
+ CommentNotification, CommentSubscription, User
+from mediagoblin.notifications.task import email_notification_task
from mediagoblin.notifications.tools import generate_comment_message
_log = logging.getLogger(__name__)
@@ -121,6 +122,12 @@ NOTIFICATION_FETCH_LIMIT = 100
def get_notifications(user_id, only_unseen=True):
query = Notification.query.filter_by(user_id=user_id)
+ wants_notifications = User.query.filter_by(id=user_id).first()\
+ .wants_notifications
+
+ # If the user does not want notifications, don't return any
+ if not wants_notifications:
+ return None
if only_unseen:
query = query.filter_by(seen=False)
@@ -130,12 +137,19 @@ def get_notifications(user_id, only_unseen=True):
return notifications
+
def get_notification_count(user_id, only_unseen=True):
query = Notification.query.filter_by(user_id=user_id)
+ wants_notifications = User.query.filter_by(id=user_id).first()\
+ .wants_notifications
if only_unseen:
query = query.filter_by(seen=False)
- count = query.count()
+ # If the user doesn't want notifications, don't show any
+ if not wants_notifications:
+ count = None
+ else:
+ count = query.count()
return count
diff --git a/mediagoblin/notifications/routing.py b/mediagoblin/notifications/routing.py
index e57956d3..cd7bbc21 100644
--- a/mediagoblin/notifications/routing.py
+++ b/mediagoblin/notifications/routing.py
@@ -23,3 +23,7 @@ add_route('mediagoblin.notifications.subscribe_comments',
add_route('mediagoblin.notifications.silence_comments',
'/u/<string:user>/m/<string:media>/notifications/silence/',
'mediagoblin.notifications.views:silence_comments')
+
+add_route('mediagoblin.notifications.mark_all_comment_notifications_seen',
+ '/notifications/comments/mark_all_seen/',
+ 'mediagoblin.notifications.views:mark_all_comment_notifications_seen')
diff --git a/mediagoblin/notifications/views.py b/mediagoblin/notifications/views.py
index d275bc92..cfe66b2e 100644
--- a/mediagoblin/notifications/views.py
+++ b/mediagoblin/notifications/views.py
@@ -14,19 +14,15 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from mediagoblin.tools.response import render_to_response, render_404, redirect
+from mediagoblin.tools.response import redirect
from mediagoblin.tools.translate import pass_to_ugettext as _
-from mediagoblin.decorators import (uses_pagination, get_user_media_entry,
- get_media_entry_by_id,
- require_active_login, user_may_delete_media, user_may_alter_collection,
- get_user_collection, get_user_collection_item, active_user_from_url)
-
+from mediagoblin.decorators import get_user_media_entry, require_active_login
from mediagoblin import messages
-from mediagoblin.notifications import add_comment_subscription, \
- silence_comment_subscription
+from mediagoblin.notifications import (add_comment_subscription,
+ silence_comment_subscription, mark_comment_notification_seen,
+ get_notifications)
-from werkzeug.exceptions import BadRequest
@get_user_media_entry
@require_active_login
@@ -41,6 +37,7 @@ def subscribe_comments(request, media):
return redirect(request, location=media.url_for_self(request.urlgen))
+
@get_user_media_entry
@require_active_login
def silence_comments(request, media):
@@ -52,3 +49,17 @@ def silence_comments(request, media):
' %s.') % media.title)
return redirect(request, location=media.url_for_self(request.urlgen))
+
+
+@require_active_login
+def mark_all_comment_notifications_seen(request):
+ """
+ Marks all comment notifications seen.
+ """
+ for comment in get_notifications(request.user.id):
+ mark_comment_notification_seen(comment.subject_id, request.user)
+
+ if request.GET.get('next'):
+ return redirect(request, location=request.GET.get('next'))
+ else:
+ return redirect(request, 'index')
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 27d89895..a4744e14 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -14,12 +14,14 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from collections import OrderedDict
import logging
import os
-from mediagoblin.db.util import atomic_update
from mediagoblin import mg_globals as mgg
-
+from mediagoblin.db.util import atomic_update
+from mediagoblin.db.models import MediaEntry
+from mediagoblin.tools.pluginapi import hook_handle
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
_log = logging.getLogger(__name__)
@@ -74,49 +76,89 @@ class FilenameBuilder(object):
ext=self.ext)
-class ProcessingState(object):
- """
- The first and only argument to the "processor" of a media type
- This could be thought of as a "request" to the processor
- function. It has the main info for the request (media entry)
- and a bunch of tools for the request on it.
- It can get more fancy without impacting old media types.
+class MediaProcessor(object):
+ """A particular processor for this media type.
+
+ While the ProcessingManager handles all types of MediaProcessing
+ possible for a particular media type, a MediaProcessor can be
+ thought of as a *particular* processing action for a media type.
+ For example, you may have separate MediaProcessors for:
+
+ - initial_processing: the intial processing of a media
+ - gen_thumb: generate a thumbnail
+ - resize: resize an image
+ - transcode: transcode a video
+
+ ... etc.
+
+ Some information on producing a new MediaProcessor for your media type:
+
+ - You *must* supply a name attribute. This must be a class level
+ attribute, and a string. This will be used to determine the
+ subcommand of your process
+ - It's recommended that you supply a class level description
+ attribute.
+ - Supply a media_is_eligible classmethod. This will be used to
+ determine whether or not a media entry is eligible to use this
+ processor type. See the method documentation for details.
+ - To give "./bin/gmg reprocess run" abilities to this media type,
+ supply both gnerate_parser and parser_to_request classmethods.
+ - The process method will be what actually processes your media.
"""
- def __init__(self, entry):
+ # You MUST override this in the child MediaProcessor!
+ name = None
+
+ # Optional, but will be used in various places to describe the
+ # action this MediaProcessor provides
+ description = None
+
+ def __init__(self, manager, entry):
+ self.manager = manager
self.entry = entry
+ self.entry_orig_state = entry.state
+
+ # Should be initialized at time of processing, at least
self.workbench = None
- self.queued_filename = None
- def set_workbench(self, wb):
- self.workbench = wb
+ def __enter__(self):
+ self.workbench = mgg.workbench_manager.create()
+ return self
+
+ def __exit__(self, *args):
+ self.workbench.destroy()
+ self.workbench = None
- def get_queued_filename(self):
+ # @with_workbench
+ def process(self, **kwargs):
"""
- Get the a filename for the original, on local storage
+ Actually process this media entry.
"""
- if self.queued_filename is not None:
- return self.queued_filename
- queued_filepath = self.entry.queued_media_file
- queued_filename = self.workbench.localized_file(
- mgg.queue_store, queued_filepath,
- 'source')
- self.queued_filename = queued_filename
- return queued_filename
-
- def copy_original(self, target_name, keyname=u"original"):
- self.store_public(keyname, self.get_queued_filename(), target_name)
-
- def store_public(self, keyname, local_file, target_name=None):
- if target_name is None:
- target_name = os.path.basename(local_file)
- target_filepath = create_pub_filepath(self.entry, target_name)
- if keyname in self.entry.media_files:
- _log.warn("store_public: keyname %r already used for file %r, "
- "replacing with %r", keyname,
- self.entry.media_files[keyname], target_filepath)
- mgg.public_store.copy_local_to_storage(local_file, target_filepath)
- self.entry.media_files[keyname] = target_filepath
+ raise NotImplementedError
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ raise NotImplementedError
+
+ ###############################
+ # Command line interface things
+ ###############################
+
+ @classmethod
+ def generate_parser(cls):
+ raise NotImplementedError
+
+ @classmethod
+ def args_to_request(cls, args):
+ raise NotImplementedError
+
+ ##########################################
+ # THE FUTURE: web interface things here :)
+ ##########################################
+
+ #####################
+ # Some common "steps"
+ #####################
def delete_queue_file(self):
# Remove queued media file from storage and database.
@@ -124,9 +166,129 @@ class ProcessingState(object):
# be removed too, but fail if the directory is not empty to be on
# the super-safe side.
queued_filepath = self.entry.queued_media_file
- mgg.queue_store.delete_file(queued_filepath) # rm file
- mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
- self.entry.queued_media_file = []
+ if queued_filepath:
+ mgg.queue_store.delete_file(queued_filepath) # rm file
+ mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
+ self.entry.queued_media_file = []
+
+
+class ProcessingKeyError(Exception): pass
+class ProcessorDoesNotExist(ProcessingKeyError): pass
+class ProcessorNotEligible(ProcessingKeyError): pass
+class ProcessingManagerDoesNotExist(ProcessingKeyError): pass
+
+
+
+class ProcessingManager(object):
+ """Manages all the processing actions available for a media type
+
+ Specific processing actions, MediaProcessor subclasses, are added
+ to the ProcessingManager.
+ """
+ def __init__(self):
+ # Dict of all MediaProcessors of this media type
+ self.processors = OrderedDict()
+
+ def add_processor(self, processor):
+ """
+ Add a processor class to this media type
+ """
+ name = processor.name
+ if name is None:
+ raise AttributeError("Processor class's .name attribute not set")
+
+ self.processors[name] = processor
+
+ def list_eligible_processors(self, entry):
+ """
+ List all processors that this media entry is eligible to be processed
+ for.
+ """
+ return [
+ processor
+ for processor in self.processors.values()
+ if processor.media_is_eligible(entry=entry)]
+
+ def list_all_processors_by_state(self, state):
+ """
+ List all processors that this media state is eligible to be processed
+ for.
+ """
+ return [
+ processor
+ for processor in self.processors.values()
+ if processor.media_is_eligible(state=state)]
+
+
+ def list_all_processors(self):
+ return self.processors.values()
+
+ def gen_process_request_via_cli(self, subparser):
+ # Got to figure out what actually goes here before I can write this properly
+ pass
+
+ def get_processor(self, key, entry=None):
+ """
+ Get the processor with this key.
+
+ If entry supplied, make sure this entry is actually compatible;
+ otherwise raise error.
+ """
+ try:
+ processor = self.processors[key]
+ except KeyError:
+ import pdb
+ pdb.set_trace()
+ raise ProcessorDoesNotExist(
+ "'%s' processor does not exist for this media type" % key)
+
+ if entry and not processor.media_is_eligible(entry):
+ raise ProcessorNotEligible(
+ "This entry is not eligible for processor with name '%s'" % key)
+
+ return processor
+
+
+def request_from_args(args, which_args):
+ """
+ Generate a request from the values of some argparse parsed args
+ """
+ request = {}
+ for arg in which_args:
+ request[arg] = getattr(args, arg)
+
+ return request
+
+
+class MediaEntryNotFound(Exception): pass
+
+
+def get_processing_manager_for_type(media_type):
+ """
+ Get the appropriate media manager for this type
+ """
+ manager_class = hook_handle(('reprocess_manager', media_type))
+ if not manager_class:
+ raise ProcessingManagerDoesNotExist(
+ "A processing manager does not exist for {0}".format(media_type))
+ manager = manager_class()
+
+ return manager
+
+
+def get_entry_and_processing_manager(media_id):
+ """
+ Get a MediaEntry, its media type, and its manager all in one go.
+
+ Returns a tuple of: `(entry, media_type, media_manager)`
+ """
+ entry = MediaEntry.query.filter_by(id=media_id).first()
+ if entry is None:
+ raise MediaEntryNotFound("Can't find media with id '%s'" % media_id)
+
+ manager = get_processing_manager_for_type(entry.media_type)
+
+ return entry, manager
def mark_entry_failed(entry_id, exc):
@@ -165,6 +327,66 @@ def mark_entry_failed(entry_id, exc):
u'fail_metadata': {}})
+def get_process_filename(entry, workbench, acceptable_files):
+ """
+ Try and get the queued file if available, otherwise return the first file
+ in the acceptable_files that we have.
+
+ If no acceptable_files, raise ProcessFileNotFound
+ """
+ if entry.queued_media_file:
+ filepath = entry.queued_media_file
+ storage = mgg.queue_store
+ else:
+ for keyname in acceptable_files:
+ if entry.media_files.get(keyname):
+ filepath = entry.media_files[keyname]
+ storage = mgg.public_store
+ break
+
+ if not filepath:
+ raise ProcessFileNotFound()
+
+ filename = workbench.localized_file(
+ storage, filepath,
+ 'source')
+
+ if not os.path.exists(filename):
+ raise ProcessFileNotFound()
+
+ return filename
+
+
+def store_public(entry, keyname, local_file, target_name=None,
+ delete_if_exists=True):
+ if target_name is None:
+ target_name = os.path.basename(local_file)
+ target_filepath = create_pub_filepath(entry, target_name)
+
+ if keyname in entry.media_files:
+ _log.warn("store_public: keyname %r already used for file %r, "
+ "replacing with %r", keyname,
+ entry.media_files[keyname], target_filepath)
+ if delete_if_exists:
+ mgg.public_store.delete_file(entry.media_files[keyname])
+
+ try:
+ mgg.public_store.copy_local_to_storage(local_file, target_filepath)
+ except:
+ raise PublicStoreFail(keyname=keyname)
+
+ # raise an error if the file failed to copy
+ copied_filepath = mgg.public_store.get_local_path(target_filepath)
+ if not os.path.exists(copied_filepath):
+ raise PublicStoreFail(keyname=keyname)
+
+ entry.media_files[keyname] = target_filepath
+
+
+def copy_original(entry, orig_filename, target_name, keyname=u"original"):
+ store_public(entry, keyname, orig_filename, target_name)
+
+
class BaseProcessingFail(Exception):
"""
Base exception that all other processing failure messages should
@@ -184,10 +406,24 @@ class BaseProcessingFail(Exception):
def __init__(self, **metadata):
self.metadata = metadata or {}
-
class BadMediaFail(BaseProcessingFail):
"""
Error that should be raised when an inappropriate file was given
for the media type specified.
"""
general_message = _(u'Invalid file given for media type.')
+
+
+class PublicStoreFail(BaseProcessingFail):
+ """
+ Error that should be raised when copying to public store fails
+ """
+ general_message = _('Copying to public storage failed.')
+
+
+class ProcessFileNotFound(BaseProcessingFail):
+ """
+ Error that should be raised when an acceptable file for processing
+ is not found.
+ """
+ general_message = _(u'An acceptable processing file was not found')
diff --git a/mediagoblin/processing/task.py b/mediagoblin/processing/task.py
index 9af192ed..7f683485 100644
--- a/mediagoblin/processing/task.py
+++ b/mediagoblin/processing/task.py
@@ -18,19 +18,20 @@ import logging
import urllib
import urllib2
-from celery import registry, task
+import celery
+from celery.registry import tasks
from mediagoblin import mg_globals as mgg
-from mediagoblin.db.models import MediaEntry
-from . import mark_entry_failed, BaseProcessingFail, ProcessingState
+from . import mark_entry_failed, BaseProcessingFail
from mediagoblin.tools.processing import json_processing_callback
+from mediagoblin.processing import get_entry_and_processing_manager
_log = logging.getLogger(__name__)
logging.basicConfig()
_log.setLevel(logging.DEBUG)
-@task.task(default_retry_delay=2 * 60)
+@celery.task(default_retry_delay=2 * 60)
def handle_push_urls(feed_url):
"""Subtask, notifying the PuSH servers of new content
@@ -60,36 +61,51 @@ def handle_push_urls(feed_url):
'Giving up.'.format(feed_url))
return False
+
################################
# Media processing initial steps
################################
-
-class ProcessMedia(task.Task):
+class ProcessMedia(celery.Task):
"""
Pass this entry off for processing.
"""
- def run(self, media_id, feed_url):
+ def run(self, media_id, feed_url, reprocess_action, reprocess_info=None):
"""
Pass the media entry off to the appropriate processing function
(for now just process_image...)
:param feed_url: The feed URL that the PuSH server needs to be
updated for.
+ :param reprocess: A dict containing all of the necessary reprocessing
+ info for the media_type.
"""
- entry = MediaEntry.query.get(media_id)
+ reprocess_info = reprocess_info or {}
+ entry, manager = get_entry_and_processing_manager(media_id)
# Try to process, and handle expected errors.
try:
- entry.state = u'processing'
- entry.save()
-
- _log.debug('Processing {0}'.format(entry))
-
- proc_state = ProcessingState(entry)
- with mgg.workbench_manager.create() as workbench:
- proc_state.set_workbench(workbench)
- # run the processing code
- entry.media_manager.processor(proc_state)
+ processor_class = manager.get_processor(reprocess_action, entry)
+
+ with processor_class(manager, entry) as processor:
+ # Initial state change has to be here because
+ # the entry.state gets recorded on processor_class init
+ entry.state = u'processing'
+ entry.save()
+
+ _log.debug('Processing {0}'.format(entry))
+
+ try:
+ processor.process(**reprocess_info)
+ except Exception as exc:
+ if processor.entry_orig_state == 'processed':
+ _log.error(
+ 'Entry {0} failed to process due to the following'
+ ' error: {1}'.format(entry.id, exc))
+ _log.info(
+ 'Setting entry.state back to "processed"')
+ pass
+ else:
+ raise
# We set the state to processed and save the entry here so there's
# no need to save at the end of the processing stage, probably ;)
@@ -140,6 +156,4 @@ class ProcessMedia(task.Task):
entry = mgg.database.MediaEntry.query.filter_by(id=entry_id).first()
json_processing_callback(entry)
-# Register the task
-process_media = registry.tasks[ProcessMedia.name]
-
+tasks.register(ProcessMedia)
diff --git a/mediagoblin/static/js/notifications.js b/mediagoblin/static/js/notifications.js
index 0153463a..78694f59 100644
--- a/mediagoblin/static/js/notifications.js
+++ b/mediagoblin/static/js/notifications.js
@@ -33,4 +33,17 @@ var notifications = {};
$(document).ready(function () {
notifications.init();
+
+ var mark_all_comments_seen = document.getElementById('mark_all_comments_seen');
+
+ if (mark_all_comments_seen) {
+ mark_all_comments_seen.href = '#';
+ mark_all_comments_seen.onclick = function() {
+ $.ajax({
+ type: 'GET',
+ url: mark_all_comments_seen_url,
+ success: function(res, status, xhr) { window.location.reload(); },
+ });
+ }
+ }
});
diff --git a/mediagoblin/submit/lib.py b/mediagoblin/submit/lib.py
index 7e85696b..1bbf2cb8 100644
--- a/mediagoblin/submit/lib.py
+++ b/mediagoblin/submit/lib.py
@@ -21,7 +21,7 @@ from werkzeug.datastructures import FileStorage
from mediagoblin.db.models import MediaEntry
from mediagoblin.processing import mark_entry_failed
-from mediagoblin.processing.task import process_media
+from mediagoblin.processing.task import ProcessMedia
_log = logging.getLogger(__name__)
@@ -76,17 +76,21 @@ def prepare_queue_task(app, entry, filename):
return queue_file
-def run_process_media(entry, feed_url=None):
+def run_process_media(entry, feed_url=None,
+ reprocess_action="initial", reprocess_info=None):
"""Process the media asynchronously
:param entry: MediaEntry() instance to be processed.
:param feed_url: A string indicating the feed_url that the PuSH servers
should be notified of. This will be sth like: `request.urlgen(
'mediagoblin.user_pages.atom_feed',qualified=True,
- user=request.user.username)`"""
+ user=request.user.username)`
+ :param reprocess_action: What particular action should be run.
+ :param reprocess_info: A dict containing all of the necessary reprocessing
+ info for the given media_type"""
try:
- process_media.apply_async(
- [entry.id, feed_url], {},
+ ProcessMedia().apply_async(
+ [entry.id, feed_url, reprocess_action, reprocess_info], {},
task_id=entry.queued_task_id)
except BaseException as exc:
# The purpose of this section is because when running in "lazy"
diff --git a/mediagoblin/submit/views.py b/mediagoblin/submit/views.py
index 3f9d5b2d..6bb95ecb 100644
--- a/mediagoblin/submit/views.py
+++ b/mediagoblin/submit/views.py
@@ -89,7 +89,7 @@ def submit_start(request):
# Save now so we have this data before kicking off processing
entry.save()
- # Pass off to processing
+ # Pass off to async processing
#
# (... don't change entry after this point to avoid race
# conditions with changes to the document via processing code)
@@ -97,6 +97,7 @@ def submit_start(request):
'mediagoblin.user_pages.atom_feed',
qualified=True, user=request.user.username)
run_process_media(entry, feed_url)
+
add_message(request, SUCCESS, _('Woohoo! Submitted!'))
add_comment_subscription(request.user, entry)
diff --git a/mediagoblin/templates/mediagoblin/base.html b/mediagoblin/templates/mediagoblin/base.html
index b1fc658e..bd26e707 100644
--- a/mediagoblin/templates/mediagoblin/base.html
+++ b/mediagoblin/templates/mediagoblin/base.html
@@ -37,6 +37,9 @@
src="{{ request.staticdirect('/js/header_dropdown.js') }}"></script>
<script type="text/javascript"
src="{{ request.staticdirect('/js/notifications.js') }}"></script>
+ <script>
+ var mark_all_comments_seen_url = "{{ request.urlgen('mediagoblin.notifications.mark_all_comment_notifications_seen') }}"
+ </script>
{# For clarification, the difference between the extra_head.html template
# and the head template hook is that the former should be used by
@@ -63,11 +66,11 @@
{% set notification_count = get_notification_count(request.user.id) %}
{% if notification_count %}
- <a href="#notifications" class="notification-gem button_action" title="Notifications">
+ <a href="javascript:;" class="notification-gem button_action" title="Notifications">
{{ notification_count }}</a>
{% endif %}
- <a href="#header" class="button_action header_dropdown_down">&#9660;</a>
- <a href="#no_header" class="button_action header_dropdown_up">&#9650;</a>
+ <a href="javascript:;" class="button_action header_dropdown_down">&#9660;</a>
+ <a href="javascript:;" class="button_action header_dropdown_up">&#9650;</a>
{% elif request.user and request.user.status == "needs_email_verification" %}
{# the following link should only appear when verification is needed #}
<a href="{{ request.urlgen('mediagoblin.user_pages.user_home',
diff --git a/mediagoblin/templates/mediagoblin/fragments/header_notifications.html b/mediagoblin/templates/mediagoblin/fragments/header_notifications.html
index 70d7935a..55759a39 100644
--- a/mediagoblin/templates/mediagoblin/fragments/header_notifications.html
+++ b/mediagoblin/templates/mediagoblin/fragments/header_notifications.html
@@ -36,5 +36,9 @@
</li>
{% endfor %}
</ul>
+ <a href="{{ request.urlgen('mediagoblin.notifications.mark_all_comment_notifications_seen') }}?next={{
+ request.base_url|urlencode }}" id="mark_all_comments_seen">
+ {% trans %}Mark all read{% endtrans %}
+ </a>
</div>
{% endif %}
diff --git a/mediagoblin/templates/mediagoblin/media_displays/video.html b/mediagoblin/templates/mediagoblin/media_displays/video.html
index b0854c9f..5c52f9f0 100644
--- a/mediagoblin/templates/mediagoblin/media_displays/video.html
+++ b/mediagoblin/templates/mediagoblin/media_displays/video.html
@@ -62,11 +62,11 @@
</a>
</li>
{% endif %}
- {% if 'webm_640' in media.media_files %}
+ {% if 'webm_video' in media.media_files %}
<li>
<a href="{{ request.app.public_store.file_url(
- media.media_files.webm_640) }}">
- {%- trans %}WebM file (640p; VP8/Vorbis){% endtrans -%}
+ media.media_files.webm_video) }}">
+ {%- trans %}WebM file (VP8/Vorbis){% endtrans -%}
</a>
</li>
{% endif %}
diff --git a/mediagoblin/tests/test_celery_setup.py b/mediagoblin/tests/test_celery_setup.py
index 0184436a..d60293f9 100644
--- a/mediagoblin/tests/test_celery_setup.py
+++ b/mediagoblin/tests/test_celery_setup.py
@@ -55,6 +55,6 @@ def test_setup_celery_from_config():
pkg_resources.resource_filename('mediagoblin.tests', 'celery.db'))
assert fake_celery_module.BROKER_TRANSPORT == 'sqlalchemy'
- assert fake_celery_module.BROKER_HOST == (
+ assert fake_celery_module.BROKER_URL == (
'sqlite:///' +
pkg_resources.resource_filename('mediagoblin.tests', 'kombu.db'))
diff --git a/mediagoblin/tests/test_mgoblin_app.ini b/mediagoblin/tests/test_mgoblin_app.ini
index 535cf1c1..da0dffb9 100644
--- a/mediagoblin/tests/test_mgoblin_app.ini
+++ b/mediagoblin/tests/test_mgoblin_app.ini
@@ -23,7 +23,7 @@ base_dir = %(here)s/user_dev/media/queue
[celery]
CELERY_ALWAYS_EAGER = true
CELERY_RESULT_DBURI = "sqlite:///%(here)s/user_dev/celery.db"
-BROKER_HOST = "sqlite:///%(here)s/user_dev/kombu.db"
+BROKER_URL = "sqlite:///%(here)s/test_user_dev/kombu.db"
[plugins]
[[mediagoblin.plugins.api]]
diff --git a/mediagoblin/tests/test_notifications.py b/mediagoblin/tests/test_notifications.py
index d52b8d5a..e075d475 100644
--- a/mediagoblin/tests/test_notifications.py
+++ b/mediagoblin/tests/test_notifications.py
@@ -149,3 +149,56 @@ otherperson@example.com\n\nSGkgb3RoZXJwZXJzb24sCmNocmlzIGNvbW1lbnRlZCBvbiB5b3VyI
# User should not have been notified
assert len(notifications) == 1
+
+ def test_mark_all_comment_notifications_seen(self):
+ """ Test that mark_all_comments_seen works"""
+
+ user = fixture_add_user('otherperson', password='nosreprehto')
+
+ media_entry = fixture_media_entry(uploader=user.id, state=u'processed')
+
+ fixture_comment_subscription(media_entry)
+
+ media_uri_id = '/u/{0}/m/{1}/'.format(user.username,
+ media_entry.id)
+
+ # add 2 comments
+ self.test_app.post(
+ media_uri_id + 'comment/add/',
+ {
+ 'comment_content': u'Test comment #43'
+ }
+ )
+
+ self.test_app.post(
+ media_uri_id + 'comment/add/',
+ {
+ 'comment_content': u'Test comment #44'
+ }
+ )
+
+ notifications = Notification.query.filter_by(
+ user_id=user.id).all()
+
+ assert len(notifications) == 2
+
+ # both comments should not be marked seen
+ assert notifications[0].seen == False
+ assert notifications[1].seen == False
+
+ # login with other user to mark notifications seen
+ self.logout()
+ self.login('otherperson', 'nosreprehto')
+
+ # mark all comment notifications seen
+ res = self.test_app.get('/notifications/comments/mark_all_seen/')
+ res.follow()
+
+ assert urlparse.urlsplit(res.location)[2] == '/'
+
+ notifications = Notification.query.filter_by(
+ user_id=user.id).all()
+
+ # both notifications should be marked seen
+ assert notifications[0].seen == True
+ assert notifications[1].seen == True
diff --git a/mediagoblin/tests/test_persona.py b/mediagoblin/tests/test_persona.py
index ce795258..919877c9 100644
--- a/mediagoblin/tests/test_persona.py
+++ b/mediagoblin/tests/test_persona.py
@@ -18,6 +18,8 @@ import pkg_resources
import pytest
import mock
+pytest.importorskip("requests")
+
from mediagoblin import mg_globals
from mediagoblin.db.base import Session
from mediagoblin.tests.tools import get_app
diff --git a/mediagoblin/user_pages/views.py b/mediagoblin/user_pages/views.py
index 87d0034e..e5646faa 100644
--- a/mediagoblin/user_pages/views.py
+++ b/mediagoblin/user_pages/views.py
@@ -323,8 +323,9 @@ def media_confirm_delete(request):
if not location:
location=media.url_to_prev(request.urlgen)
if not location:
- location="mediagoblin.user_pages.user_home"
- return redirect(request, location=location, user=username)
+ location=request.urlgen("mediagoblin.user_pages.user_home",
+ user=username)
+ return redirect(request, location=location)
else:
messages.add_message(
request, messages.ERROR,