aboutsummaryrefslogtreecommitdiffstats
path: root/mediagoblin/db
diff options
context:
space:
mode:
Diffstat (limited to 'mediagoblin/db')
-rw-r--r--mediagoblin/db/mongo/models.py153
1 files changed, 106 insertions, 47 deletions
diff --git a/mediagoblin/db/mongo/models.py b/mediagoblin/db/mongo/models.py
index 906d2849..e085840e 100644
--- a/mediagoblin/db/mongo/models.py
+++ b/mediagoblin/db/mongo/models.py
@@ -14,16 +14,17 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import datetime
+import datetime, uuid
from mongokit import Document
+from mediagoblin.auth import lib as auth_lib
from mediagoblin import mg_globals
-from mediagoblin.db.mongo import migrations
-from mediagoblin.db.mongo.util import ASCENDING, DESCENDING, ObjectId
+from mediagoblin.db import migrations
+from mediagoblin.db.util import ASCENDING, DESCENDING, ObjectId
from mediagoblin.tools.pagination import Pagination
-from mediagoblin.tools import url
-from mediagoblin.db.mixin import UserMixin, MediaEntryMixin
+from mediagoblin.tools import url, common
+from mediagoblin.tools import licenses
###################
# Custom validators
@@ -34,7 +35,7 @@ from mediagoblin.db.mixin import UserMixin, MediaEntryMixin
########
-class User(Document, UserMixin):
+class User(Document):
"""
A user of MediaGoblin.
@@ -62,23 +63,22 @@ class User(Document, UserMixin):
- bio_html: biography of the user converted to proper HTML.
"""
__collection__ = 'users'
- use_dot_notation = True
structure = {
'username': unicode,
'email': unicode,
'created': datetime.datetime,
- 'plugin_data': dict, # plugins can dump stuff here.
+ 'plugin_data': dict, # plugins can dump stuff here.
'pw_hash': unicode,
'email_verified': bool,
'status': unicode,
'verification_key': unicode,
'is_admin': bool,
- 'url': unicode,
- 'bio': unicode, # May contain markdown
- 'bio_html': unicode, # May contain plaintext, or HTML
- 'fp_verification_key': unicode, # forgotten password verification key
- 'fp_token_expire': datetime.datetime,
+ 'url' : unicode,
+ 'bio' : unicode, # May contain markdown
+ 'bio_html': unicode, # May contain plaintext, or HTML
+ 'fp_verification_key': unicode, # forgotten password verification key
+ 'fp_token_expire': datetime.datetime
}
required_fields = ['username', 'created', 'pw_hash', 'email']
@@ -87,10 +87,18 @@ class User(Document, UserMixin):
'created': datetime.datetime.utcnow,
'email_verified': False,
'status': u'needs_email_verification',
+ 'verification_key': lambda: unicode(uuid.uuid4()),
'is_admin': False}
+ def check_login(self, password):
+ """
+ See if a user can login with this password
+ """
+ return auth_lib.bcrypt_check_password(
+ password, self['pw_hash'])
+
-class MediaEntry(Document, MediaEntryMixin):
+class MediaEntry(Document):
"""
Record of a piece of media.
@@ -122,7 +130,7 @@ class MediaEntry(Document, MediaEntryMixin):
For example, images might contain some EXIF data that's not appropriate
to other formats. You might store it like:
- mediaentry.media_data['exif'] = {
+ mediaentry['media_data']['exif'] = {
'manufacturer': 'CASIO',
'model': 'QV-4000',
'exposure_time': .659}
@@ -130,7 +138,7 @@ class MediaEntry(Document, MediaEntryMixin):
Alternately for video you might store:
# play length in seconds
- mediaentry.media_data['play_length'] = 340
+ mediaentry['media_data']['play_length'] = 340
... so what's appropriate here really depends on the media type.
@@ -151,6 +159,8 @@ class MediaEntry(Document, MediaEntryMixin):
"unprocessed": uploaded but needs to go through processing for display
"processed": processed and able to be displayed
+ - license: URI for entry's license
+
- queued_media_file: storage interface style filepath describing a file
queued for processing. This is stored in the mg_globals.queue_store
storage system.
@@ -165,24 +175,25 @@ class MediaEntry(Document, MediaEntryMixin):
critical to this piece of media but may be usefully relevant to people
viewing the work. (currently unused.)
- - fail_error: path to the exception raised
- - fail_metadata:
+ - fail_error: path to the exception raised
+ - fail_metadata:
+
"""
__collection__ = 'media_entries'
- use_dot_notation = True
structure = {
'uploader': ObjectId,
'title': unicode,
'slug': unicode,
'created': datetime.datetime,
- 'description': unicode, # May contain markdown/up
- 'description_html': unicode, # May contain plaintext, or HTML
+ 'description': unicode, # May contain markdown/up
+ 'description_html': unicode, # May contain plaintext, or HTML
'media_type': unicode,
- 'media_data': dict, # extra data relevant to this media_type
- 'plugin_data': dict, # plugins can dump stuff here.
+ 'media_data': dict, # extra data relevant to this media_type
+ 'plugin_data': dict, # plugins can dump stuff here.
'tags': [dict],
'state': unicode,
+ 'license': unicode, # License URI
# For now let's assume there can only be one main file queued
# at a time
@@ -208,50 +219,99 @@ class MediaEntry(Document, MediaEntryMixin):
'created': datetime.datetime.utcnow,
'state': u'unprocessed'}
- def get_comments(self, ascending=False):
- if ascending:
- order = ASCENDING
- else:
- order = DESCENDING
-
+ def get_comments(self):
return self.db.MediaComment.find({
- 'media_entry': self._id}).sort('created', order)
+ 'media_entry': self['_id']}).sort('created', DESCENDING)
+
+ def get_display_media(self, media_map, fetch_order=common.DISPLAY_IMAGE_FETCHING_ORDER):
+ """
+ Find the best media for display.
+
+ Args:
+ - media_map: a dict like
+ {u'image_size': [u'dir1', u'dir2', u'image.jpg']}
+ - fetch_order: the order we should try fetching images in
+
+ Returns:
+ (media_size, media_path)
+ """
+ media_sizes = media_map.keys()
+
+ for media_size in common.DISPLAY_IMAGE_FETCHING_ORDER:
+ if media_size in media_sizes:
+ return media_map[media_size]
+
+ def main_mediafile(self):
+ pass
def generate_slug(self):
- self.slug = url.slugify(self.title)
+ self['slug'] = url.slugify(self['title'])
duplicate = mg_globals.database.media_entries.find_one(
- {'slug': self.slug})
+ {'slug': self['slug']})
if duplicate:
- self.slug = "%s-%s" % (self._id, self.slug)
+ self['slug'] = "%s-%s" % (self['_id'], self['slug'])
+
+ def url_for_self(self, urlgen):
+ """
+ Generate an appropriate url for ourselves
+
+ Use a slug if we have one, else use our '_id'.
+ """
+ uploader = self.uploader()
+
+ if self.get('slug'):
+ return urlgen(
+ 'mediagoblin.user_pages.media_home',
+ user=uploader['username'],
+ media=self['slug'])
+ else:
+ return urlgen(
+ 'mediagoblin.user_pages.media_home',
+ user=uploader['username'],
+ media=unicode(self['_id']))
def url_to_prev(self, urlgen):
"""
Provide a url to the previous entry from this user, if there is one
"""
- cursor = self.db.MediaEntry.find({'_id': {"$gt": self._id},
- 'uploader': self.uploader,
+ cursor = self.db.MediaEntry.find({'_id' : {"$gt": self['_id']},
+ 'uploader': self['uploader'],
'state': 'processed'}).sort(
'_id', ASCENDING).limit(1)
- for media in cursor:
- return media.url_for_self(urlgen)
+ if cursor.count():
+ return urlgen('mediagoblin.user_pages.media_home',
+ user=self.uploader()['username'],
+ media=unicode(cursor[0]['slug']))
def url_to_next(self, urlgen):
"""
Provide a url to the next entry from this user, if there is one
"""
- cursor = self.db.MediaEntry.find({'_id': {"$lt": self._id},
- 'uploader': self.uploader,
+ cursor = self.db.MediaEntry.find({'_id' : {"$lt": self['_id']},
+ 'uploader': self['uploader'],
'state': 'processed'}).sort(
'_id', DESCENDING).limit(1)
- for media in cursor:
- return media.url_for_self(urlgen)
+ if cursor.count():
+ return urlgen('mediagoblin.user_pages.media_home',
+ user=self.uploader()['username'],
+ media=unicode(cursor[0]['slug']))
+
+ def uploader(self):
+ return self.db.User.find_one({'_id': self['uploader']})
+
+ def get_fail_exception(self):
+ """
+ Get the exception that's appropriate for this error
+ """
+ if self['fail_error']:
+ return common.import_component(self['fail_error'])
- @property
- def get_uploader(self):
- return self.db.User.find_one({'_id': self.uploader})
+ def get_license_data(self):
+ """Return license dict for requested license"""
+ return licenses.SUPPORTED_LICENSES[self['license']]
class MediaComment(Document):
@@ -268,7 +328,6 @@ class MediaComment(Document):
"""
__collection__ = 'media_comments'
- use_dot_notation = True
structure = {
'media_entry': ObjectId,
@@ -286,8 +345,7 @@ class MediaComment(Document):
def media_entry(self):
return self.db.MediaEntry.find_one({'_id': self['media_entry']})
- @property
- def get_author(self):
+ def author(self):
return self.db.User.find_one({'_id': self['author']})
@@ -302,3 +360,4 @@ def register_models(connection):
Register all models in REGISTER_MODELS with this connection.
"""
connection.register(REGISTER_MODELS)
+