diff --git a/ietf/secr/proceedings/utils.py b/ietf/secr/proceedings/utils.py
index ea4a5f114..8e9dc6ef4 100644
--- a/ietf/secr/proceedings/utils.py
+++ b/ietf/secr/proceedings/utils.py
@@ -36,9 +36,16 @@ def handle_upload_file(file,filename,meeting,subdir, request=None):
destination = open(os.path.join(path,filename), 'wb+')
if extension in settings.MEETING_VALID_MIME_TYPE_EXTENSIONS['text/html']:
file.open()
- text = file.read()
+ text = file.read().decode('utf-8')
# Whole file sanitization; add back '' (sanitize will remove it)
- clean = u"\n%s\n\n" % sanitize_html(text)
+ clean = u"""
+
+
%s
+
+ %s
+
+
+ """ % (filename, sanitize_html(text))
destination.write(clean.encode('utf8'))
if request and clean != text:
messages.warning(request, "Uploaded html content is sanitized to prevent unsafe content. "
diff --git a/ietf/utils/html.py b/ietf/utils/html.py
index 9236a8e33..1715fec69 100644
--- a/ietf/utils/html.py
+++ b/ietf/utils/html.py
@@ -3,19 +3,38 @@
"""Utilities for working with HTML."""
import bleach
+from html5lib.filters.base import Filter
+
import debug # pyflakes:ignore
from django.utils.functional import keep_lazy
from django.utils import six
-acceptable_elements = ('a', 'abbr', 'acronym', 'address', 'b', 'big',
+acceptable_tags = ('a', 'abbr', 'acronym', 'address', 'b', 'big',
'blockquote', 'br', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'font',
- 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins', 'kbd',
+ 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'ins', 'kbd',
'li', 'ol', 'p', 'pre', 'q', 's', 'samp', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead',
'tr', 'tt', 'u', 'ul', 'var')
+strip_completely = ['style', 'script', ]
+
+class StripFilter(Filter):
+ def __iter__(self):
+ open_tags = []
+ for token in Filter.__iter__(self):
+ if token["type"] in ["EmptyTag", "StartTag"]:
+ open_tags.append(token["name"])
+ if not set(strip_completely) & set(open_tags):
+ yield token
+ if token["type"] in ["EmptyTag", "EndTag"]:
+ open_tags.pop()
+
+# Leave the stripping of the strip_completely tags to StripFilter
+bleach_tags = list(set(acceptable_tags) | set(strip_completely))
+cleaner = bleach.sanitizer.Cleaner(tags=bleach_tags, filters=[StripFilter], strip=True)
+
def unescape(text):
"""
Returns the given text with ampersands, quotes and angle brackets decoded
@@ -27,13 +46,10 @@ def unescape(text):
def remove_tags(html, tags):
"""Returns the given HTML sanitized, and with the given tags removed."""
- allowed = set(acceptable_elements) - set([ t.lower() for t in tags ])
+ allowed = set(acceptable_tags) - set([ t.lower() for t in tags ])
return bleach.clean(html, tags=allowed)
remove_tags = keep_lazy(remove_tags, six.text_type)
-def sanitize_html(html, tags=acceptable_elements, extra=[], remove=[], strip=True):
- tags = list(set(tags) | set(t.lower() for t in extra) ^ set(t.lower for t in remove))
- return bleach.clean(html, tags=tags, strip=strip)
+def sanitize_html(html):
+ return cleaner.clean(html)
-def clean_html(html):
- return bleach.clean(html)