diff --git a/drupal2spip_lal/base/convert.py b/drupal2spip_lal/base/convert.py index ca5ab3d..cfaf83c 100644 --- a/drupal2spip_lal/base/convert.py +++ b/drupal2spip_lal/base/convert.py @@ -53,7 +53,7 @@ def strong_to_dl(html): return '\n'.join(r) -def fetch_and_remove_logo(article): +def fetch_and_remove_logo(article, force_download): def fetch_logo(src): """ SPIP gère les logos à la façon d'un hack : un fichier dans IMG nommé @@ -63,11 +63,11 @@ def fetch_and_remove_logo(article): filename = 'arton{}.{}'.format(article.pk, ext) path = os.path.join(settings.SPIP_LOGO_DIR, filename) - r = request.get(src, stream=True) - - with open(path, 'wb') as fd: - for chunk in r.iter_content(chunk_size=128): - fd.write(chunk) + if not os.access(path, os.R_OK) or force_download: + r = request.get(src, stream=True) + with open(path, 'wb') as fd: + for chunk in r.iter_content(chunk_size=128): + fd.write(chunk) def remove_img(img): has_siblings = [ @@ -100,12 +100,15 @@ def sanitarize_html(html): return soup.prettify(formatter="html5") -def convert_node(node, update=False): +def convert_node(node, options): """ Le point d'entrée fonctionnel c'est les Urls. On se base donc là dessus pour vérifier si l'import est à faire ou pas ou encore à upgrader. """ + update = options.get('update', False) + force_download = options.get('force_download', False) + node_urls = drupal.UrlAlias.objects.filter(src='node/{}'.format(node.pk)) spip_urls = spip.Urls.objects.filter( @@ -169,7 +172,7 @@ def convert_node(node, update=False): auteur=auteur, id_objet=article.pk, objet='article' ) - fetch_and_remove_logo(article) + fetch_and_remove_logo(article, force_download) for term_node in node.termnode_set.all(): groupe, _ = spip.GroupesMots.objects.get_or_create( diff --git a/drupal2spip_lal/base/management/commands/import.py b/drupal2spip_lal/base/management/commands/import.py index 9cedb68..83a243b 100644 --- a/drupal2spip_lal/base/management/commands/import.py +++ b/drupal2spip_lal/base/management/commands/import.py @@ -19,9 +19,14 @@ class Command(BaseCommand): action='store_true', help='Force existing articles to be updated. Default is skip.', ) + parser.add_argument( + '--force-download', + action='store_true', + help='Force existing ressources to be downloaded. Default is skip.', + ) def handle(self, **options): [ - convert_node(n, update=options.get('update', False)) + convert_node(n, options) for n in Node.objects.filter(pk__in=options.get('node', [])) ]