aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristopher Allan Webber <cwebber@dustycloud.org>2013-03-03 10:36:37 -0600
committerChristopher Allan Webber <cwebber@dustycloud.org>2013-03-03 10:36:37 -0600
commit29adab4651cd2485441a08bb97ae0165e5be0017 (patch)
treea44c01ac75db5f1cf069a09a2d07a1ca52b4ab27
parentddbf6af1e20222882a7ce559804ed48f4ad31a92 (diff)
downloadmediagoblin-29adab4651cd2485441a08bb97ae0165e5be0017.tar.lz
mediagoblin-29adab4651cd2485441a08bb97ae0165e5be0017.tar.xz
mediagoblin-29adab4651cd2485441a08bb97ae0165e5be0017.zip
Now store metadata info from processing into the media type.
This comes in several parts: - Store the metadata from gstreamer during processing - Add a new JSONEncoded field to the VideoData table - And, of course, add a migration for that field! This commit sponsored by Julius Tuomisto. Thank you, Julius!
-rw-r--r--mediagoblin/media_types/video/migrations.py15
-rw-r--r--mediagoblin/media_types/video/models.py19
-rw-r--r--mediagoblin/media_types/video/processing.py28
3 files changed, 62 insertions, 0 deletions
diff --git a/mediagoblin/media_types/video/migrations.py b/mediagoblin/media_types/video/migrations.py
index f54c23ea..442bbd8d 100644
--- a/mediagoblin/media_types/video/migrations.py
+++ b/mediagoblin/media_types/video/migrations.py
@@ -14,4 +14,19 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from mediagoblin.db.migration_tools import RegisterMigration, inspect_table
+
+from sqlalchemy import MetaData, Column, Unicode
+
MIGRATIONS = {}
+
+@RegisterMigration(1, MIGRATIONS)
+def add_orig_metadata_column(db_conn):
+ metadata = MetaData(bind=db_conn.bind)
+
+ vid_data = inspect_table(metadata, "video__mediadata")
+
+ col = Column('orig_metadata', Unicode,
+ default=None, nullable=True)
+ col.create(vid_data)
+ db_conn.commit()
diff --git a/mediagoblin/media_types/video/models.py b/mediagoblin/media_types/video/models.py
index a771352c..e0043718 100644
--- a/mediagoblin/media_types/video/models.py
+++ b/mediagoblin/media_types/video/models.py
@@ -20,12 +20,29 @@ from mediagoblin.db.base import Base
from sqlalchemy import (
Column, Integer, SmallInteger, ForeignKey)
from sqlalchemy.orm import relationship, backref
+from mediagoblin.db.extratypes import JSONEncoded
BACKREF_NAME = "video__media_data"
class VideoData(Base):
+ """
+ Attributes:
+ - media_data: the originating media entry (of course)
+ - width: width of the transcoded video
+ - height: height of the transcoded video
+ - orig_metadata: A loose json structure containing metadata gstreamer
+ pulled from the original video.
+ This field is NOT GUARANTEED to exist!
+
+ Likely metadata extracted:
+ "videoheight", "videolength", "videowidth",
+ "audiorate", "audiolength", "audiochannels", "audiowidth",
+ "mimetype", "tags"
+
+ TODO: document the above better.
+ """
__tablename__ = "video__mediadata"
# The primary key *and* reference to the main media_entry
@@ -38,6 +55,8 @@ class VideoData(Base):
width = Column(SmallInteger)
height = Column(SmallInteger)
+ orig_metadata = Column(JSONEncoded)
+
DATA_MODEL = VideoData
MODELS = [VideoData]
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
index 32313be7..ec9ff225 100644
--- a/mediagoblin/media_types/video/processing.py
+++ b/mediagoblin/media_types/video/processing.py
@@ -86,8 +86,12 @@ def process_video(proc_state):
mgg.global_config['media:medium']['max_width'],
mgg.global_config['media:medium']['max_height'])
+ # Extract metadata and keep a record of it
metadata = transcoders.VideoTranscoder().discover(queued_filename)
+ store_metadata(entry, metadata)
+ # Figure out whether or not we need to transcode this video or
+ # if we can skip it
if skip_transcode(metadata):
_log.debug('Skipping transcoding')
# Just push the submitted file to the tmp_dst
@@ -152,3 +156,27 @@ def process_video(proc_state):
# Remove queued media file from storage and database
proc_state.delete_queue_file()
+
+
+def store_metadata(media_entry, metadata):
+ """
+ Store metadata from this video for this media entry.
+ """
+ # Let's pull out the easy, not having to be converted ones first
+ stored_metadata = dict(
+ [(key, metadata[key])
+ for key in [
+ "videoheight", "videolength", "videowidth",
+ "audiorate", "audiolength", "audiochannels", "audiowidth",
+ "mimetype", "tags"]
+ if key in metadata])
+
+ # We have to convert videorate into a sequence because it's a
+ # special type normally..
+
+ if "videorate" in metadata:
+ videorate = metadata["videorate"]
+ stored_metadata["videorate"] = [videorate.num, videorate.denom]
+
+ media_entry.media_data_init(
+ orig_metadata=stored_metadata)