summaryrefslogtreecommitdiff
path: root/isso/utils/html.py
blob: 294b8d4c1b23222cdac853317edae2f72859770f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
# -*- encoding: utf-8 -*-

from __future__ import unicode_literals

import operator
import pkg_resources

from distutils.version import LooseVersion as Version

HTML5LIB_VERSION = Version(pkg_resources.get_distribution("html5lib").version)
HTML5LIB_SIMPLETREE = Version("0.95")

from isso.compat import reduce

import html5lib
from html5lib.sanitizer import HTMLSanitizer
from html5lib.serializer import HTMLSerializer

import misaka


def Sanitizer(elements, attributes):

    class Inner(HTMLSanitizer):

        # attributes found in Sundown's HTML serializer [1] except for <img> tag,
        # because images are not generated anyways.
        #
        # [1] https://github.com/vmg/sundown/blob/master/html/html.c
        allowed_elements = ["a", "p", "hr", "br", "ol", "ul", "li",
                            "pre", "code", "blockquote",
                            "del", "ins", "strong", "em",
                            "h1", "h2", "h3", "h4", "h5", "h6",
                            "table", "thead", "tbody", "th", "td"] + elements

        # href for <a> and align for <table>
        allowed_attributes = ["align", "href"] + attributes

        # remove disallowed tokens from the output
        def disallowed_token(self, token, token_type):
            return None

    return Inner


def sanitize(tokenizer, document):

    parser = html5lib.HTMLParser(tokenizer=tokenizer)
    domtree = parser.parseFragment(document)

    if HTML5LIB_VERSION > HTML5LIB_SIMPLETREE:
        builder = "etree"
    else:
        builder = "simpletree"

    stream = html5lib.treewalkers.getTreeWalker(builder)(domtree)
    serializer = HTMLSerializer(quote_attr_values=True, omit_optional_tags=False)

    return serializer.render(stream)


def Markdown(extensions=("strikethrough", "superscript", "autolink")):

    flags = reduce(operator.xor, map(
        lambda ext: getattr(misaka, 'EXT_' + ext.upper()), extensions), 0)
    md = misaka.Markdown(Unofficial(), extensions=flags)

    def inner(text):
        rv = md.render(text).rstrip("\n")
        if rv.startswith("<p>") or rv.endswith("</p>"):
            return rv
        return "<p>" + rv + "</p>"

    return inner


class Unofficial(misaka.HtmlRenderer):
    """A few modifications to process "common" Markdown.

    For instance, fenced code blocks (~~~ or ```) are just wrapped in <code>
    which does not preserve line breaks. If a language is given, it is added
    to <code class="$lang">, compatible with Highlight.js.
    """

    def block_code(self, text, lang):
        lang = ' class="{0}"'.format(lang) if lang else ''
        return "<pre><code{1}>{0}</code></pre>\n".format(text, lang)


class Markup(object):

    def __init__(self, conf):

        parser = Markdown(conf.getlist("options"))
        sanitizer = Sanitizer(
            conf.getlist("allowed-elements"),
            conf.getlist("allowed-attributes"))

        self._render = lambda text: sanitize(sanitizer, parser(text))

    def render(self, text):
        return self._render(text)