aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortilly-Q <nattilypigeonfowl@gmail.com>2014-05-13 16:53:28 -0400
committertilly-Q <nattilypigeonfowl@gmail.com>2014-05-13 16:57:12 -0400
commitc0ea2bad04c7c0ce28659a73bd63ca409c847519 (patch)
tree58494d46d21878aa1f71e63ada5d5277eed9d434
parent1d09e8b4f1be938a3415329f4fd93e4f6936b16a (diff)
downloadmediagoblin-c0ea2bad04c7c0ce28659a73bd63ca409c847519.tar.lz
mediagoblin-c0ea2bad04c7c0ce28659a73bd63ca409c847519.tar.xz
mediagoblin-c0ea2bad04c7c0ce28659a73bd63ca409c847519.zip
Prepared for input without an 'id' column and made all of the internal nodes
into free floating nodes so that compact_and_validate will remove them.
-rw-r--r--mediagoblin/gmg_commands/batchaddmedia.py11
1 files changed, 4 insertions, 7 deletions
diff --git a/mediagoblin/gmg_commands/batchaddmedia.py b/mediagoblin/gmg_commands/batchaddmedia.py
index 75e7b7c5..58ca7e74 100644
--- a/mediagoblin/gmg_commands/batchaddmedia.py
+++ b/mediagoblin/gmg_commands/batchaddmedia.py
@@ -99,10 +99,7 @@ def batchaddmedia(args):
# Get all metadata entries starting with 'media' as variables and then
# delete them because those are for internal use only.
- original_location = file_metadata['media:location']
- file_metadata = dict([(key, value)
- for key, value in file_metadata.iteritems() if
- key.split(":")[0] != 'media'])
+ original_location = file_metadata['location']
try:
json_ld_metadata = compact_and_validate(file_metadata)
except ValidationError, exc:
@@ -175,7 +172,7 @@ u"FAIL: This file is larger than the upload limits for this site.")
def parse_csv_file(file_contents):
"""
The helper function which converts the csv file into a dictionary where each
- item's key is the provided value 'media:id' and each item's value is another
+ item's key is the provided value 'id' and each item's value is another
dictionary.
"""
list_of_contents = file_contents.split('\n')
@@ -184,12 +181,12 @@ def parse_csv_file(file_contents):
objects_dict = {}
# Build a dictionary
- for line in lines:
+ for index, line in enumerate(lines):
if line.isspace() or line == '': continue
values = csv_reader([line]).next()
line_dict = dict([(key[i], val)
for i, val in enumerate(values)])
- media_id = line_dict['media:id']
+ media_id = line_dict.get('id') or index
objects_dict[media_id] = (line_dict)
return objects_dict