escape.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. #
  2. # Copyright 2009 Facebook
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  5. # not use this file except in compliance with the License. You may obtain
  6. # a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  12. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  13. # License for the specific language governing permissions and limitations
  14. # under the License.
  15. """Escaping/unescaping methods for HTML, JSON, URLs, and others.
  16. Also includes a few other miscellaneous string manipulation functions that
  17. have crept in over time.
  18. """
  19. import html.entities
  20. import json
  21. import re
  22. import urllib.parse
  23. from tornado.util import unicode_type
  24. import typing
  25. from typing import Union, Any, Optional, Dict, List, Callable
  26. _XHTML_ESCAPE_RE = re.compile("[&<>\"']")
  27. _XHTML_ESCAPE_DICT = {
  28. "&": "&amp;",
  29. "<": "&lt;",
  30. ">": "&gt;",
  31. '"': "&quot;",
  32. "'": "&#39;",
  33. }
  34. def xhtml_escape(value: Union[str, bytes]) -> str:
  35. """Escapes a string so it is valid within HTML or XML.
  36. Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``.
  37. When used in attribute values the escaped strings must be enclosed
  38. in quotes.
  39. .. versionchanged:: 3.2
  40. Added the single quote to the list of escaped characters.
  41. """
  42. return _XHTML_ESCAPE_RE.sub(
  43. lambda match: _XHTML_ESCAPE_DICT[match.group(0)], to_basestring(value)
  44. )
  45. def xhtml_unescape(value: Union[str, bytes]) -> str:
  46. """Un-escapes an XML-escaped string."""
  47. return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
  48. # The fact that json_encode wraps json.dumps is an implementation detail.
  49. # Please see https://github.com/tornadoweb/tornado/pull/706
  50. # before sending a pull request that adds **kwargs to this function.
  51. def json_encode(value: Any) -> str:
  52. """JSON-encodes the given Python object."""
  53. # JSON permits but does not require forward slashes to be escaped.
  54. # This is useful when json data is emitted in a <script> tag
  55. # in HTML, as it prevents </script> tags from prematurely terminating
  56. # the javascript. Some json libraries do this escaping by default,
  57. # although python's standard library does not, so we do it here.
  58. # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
  59. return json.dumps(value).replace("</", "<\\/")
  60. def json_decode(value: Union[str, bytes]) -> Any:
  61. """Returns Python objects for the given JSON string.
  62. Supports both `str` and `bytes` inputs.
  63. """
  64. return json.loads(to_basestring(value))
  65. def squeeze(value: str) -> str:
  66. """Replace all sequences of whitespace chars with a single space."""
  67. return re.sub(r"[\x00-\x20]+", " ", value).strip()
  68. def url_escape(value: Union[str, bytes], plus: bool = True) -> str:
  69. """Returns a URL-encoded version of the given value.
  70. If ``plus`` is true (the default), spaces will be represented
  71. as "+" instead of "%20". This is appropriate for query strings
  72. but not for the path component of a URL. Note that this default
  73. is the reverse of Python's urllib module.
  74. .. versionadded:: 3.1
  75. The ``plus`` argument
  76. """
  77. quote = urllib.parse.quote_plus if plus else urllib.parse.quote
  78. return quote(utf8(value))
  79. @typing.overload
  80. def url_unescape(value: Union[str, bytes], encoding: None, plus: bool = True) -> bytes:
  81. pass
  82. @typing.overload # noqa: F811
  83. def url_unescape(
  84. value: Union[str, bytes], encoding: str = "utf-8", plus: bool = True
  85. ) -> str:
  86. pass
  87. def url_unescape( # noqa: F811
  88. value: Union[str, bytes], encoding: Optional[str] = "utf-8", plus: bool = True
  89. ) -> Union[str, bytes]:
  90. """Decodes the given value from a URL.
  91. The argument may be either a byte or unicode string.
  92. If encoding is None, the result will be a byte string. Otherwise,
  93. the result is a unicode string in the specified encoding.
  94. If ``plus`` is true (the default), plus signs will be interpreted
  95. as spaces (literal plus signs must be represented as "%2B"). This
  96. is appropriate for query strings and form-encoded values but not
  97. for the path component of a URL. Note that this default is the
  98. reverse of Python's urllib module.
  99. .. versionadded:: 3.1
  100. The ``plus`` argument
  101. """
  102. if encoding is None:
  103. if plus:
  104. # unquote_to_bytes doesn't have a _plus variant
  105. value = to_basestring(value).replace("+", " ")
  106. return urllib.parse.unquote_to_bytes(value)
  107. else:
  108. unquote = urllib.parse.unquote_plus if plus else urllib.parse.unquote
  109. return unquote(to_basestring(value), encoding=encoding)
  110. def parse_qs_bytes(
  111. qs: str, keep_blank_values: bool = False, strict_parsing: bool = False
  112. ) -> Dict[str, List[bytes]]:
  113. """Parses a query string like urlparse.parse_qs, but returns the
  114. values as byte strings.
  115. Keys still become type str (interpreted as latin1 in python3!)
  116. because it's too painful to keep them as byte strings in
  117. python3 and in practice they're nearly always ascii anyway.
  118. """
  119. # This is gross, but python3 doesn't give us another way.
  120. # Latin1 is the universal donor of character encodings.
  121. result = urllib.parse.parse_qs(
  122. qs, keep_blank_values, strict_parsing, encoding="latin1", errors="strict"
  123. )
  124. encoded = {}
  125. for k, v in result.items():
  126. encoded[k] = [i.encode("latin1") for i in v]
  127. return encoded
  128. _UTF8_TYPES = (bytes, type(None))
  129. @typing.overload
  130. def utf8(value: bytes) -> bytes:
  131. pass
  132. @typing.overload # noqa: F811
  133. def utf8(value: str) -> bytes:
  134. pass
  135. @typing.overload # noqa: F811
  136. def utf8(value: None) -> None:
  137. pass
  138. def utf8(value: Union[None, str, bytes]) -> Optional[bytes]: # noqa: F811
  139. """Converts a string argument to a byte string.
  140. If the argument is already a byte string or None, it is returned unchanged.
  141. Otherwise it must be a unicode string and is encoded as utf8.
  142. """
  143. if isinstance(value, _UTF8_TYPES):
  144. return value
  145. if not isinstance(value, unicode_type):
  146. raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
  147. return value.encode("utf-8")
  148. _TO_UNICODE_TYPES = (unicode_type, type(None))
  149. @typing.overload
  150. def to_unicode(value: str) -> str:
  151. pass
  152. @typing.overload # noqa: F811
  153. def to_unicode(value: bytes) -> str:
  154. pass
  155. @typing.overload # noqa: F811
  156. def to_unicode(value: None) -> None:
  157. pass
  158. def to_unicode(value: Union[None, str, bytes]) -> Optional[str]: # noqa: F811
  159. """Converts a string argument to a unicode string.
  160. If the argument is already a unicode string or None, it is returned
  161. unchanged. Otherwise it must be a byte string and is decoded as utf8.
  162. """
  163. if isinstance(value, _TO_UNICODE_TYPES):
  164. return value
  165. if not isinstance(value, bytes):
  166. raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
  167. return value.decode("utf-8")
  168. # to_unicode was previously named _unicode not because it was private,
  169. # but to avoid conflicts with the built-in unicode() function/type
  170. _unicode = to_unicode
  171. # When dealing with the standard library across python 2 and 3 it is
  172. # sometimes useful to have a direct conversion to the native string type
  173. native_str = to_unicode
  174. to_basestring = to_unicode
  175. def recursive_unicode(obj: Any) -> Any:
  176. """Walks a simple data structure, converting byte strings to unicode.
  177. Supports lists, tuples, and dictionaries.
  178. """
  179. if isinstance(obj, dict):
  180. return dict(
  181. (recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items()
  182. )
  183. elif isinstance(obj, list):
  184. return list(recursive_unicode(i) for i in obj)
  185. elif isinstance(obj, tuple):
  186. return tuple(recursive_unicode(i) for i in obj)
  187. elif isinstance(obj, bytes):
  188. return to_unicode(obj)
  189. else:
  190. return obj
  191. # I originally used the regex from
  192. # http://daringfireball.net/2010/07/improved_regex_for_matching_urls
  193. # but it gets all exponential on certain patterns (such as too many trailing
  194. # dots), causing the regex matcher to never return.
  195. # This regex should avoid those problems.
  196. # Use to_unicode instead of tornado.util.u - we don't want backslashes getting
  197. # processed as escapes.
  198. _URL_RE = re.compile(
  199. to_unicode(
  200. r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&amp;|&quot;)*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&amp;|&quot;)*\)))+)""" # noqa: E501
  201. )
  202. )
  203. def linkify(
  204. text: Union[str, bytes],
  205. shorten: bool = False,
  206. extra_params: Union[str, Callable[[str], str]] = "",
  207. require_protocol: bool = False,
  208. permitted_protocols: List[str] = ["http", "https"],
  209. ) -> str:
  210. """Converts plain text into HTML with links.
  211. For example: ``linkify("Hello http://tornadoweb.org!")`` would return
  212. ``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
  213. Parameters:
  214. * ``shorten``: Long urls will be shortened for display.
  215. * ``extra_params``: Extra text to include in the link tag, or a callable
  216. taking the link as an argument and returning the extra text
  217. e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
  218. or::
  219. def extra_params_cb(url):
  220. if url.startswith("http://example.com"):
  221. return 'class="internal"'
  222. else:
  223. return 'class="external" rel="nofollow"'
  224. linkify(text, extra_params=extra_params_cb)
  225. * ``require_protocol``: Only linkify urls which include a protocol. If
  226. this is False, urls such as www.facebook.com will also be linkified.
  227. * ``permitted_protocols``: List (or set) of protocols which should be
  228. linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
  229. "mailto"])``. It is very unsafe to include protocols such as
  230. ``javascript``.
  231. """
  232. if extra_params and not callable(extra_params):
  233. extra_params = " " + extra_params.strip()
  234. def make_link(m: typing.Match) -> str:
  235. url = m.group(1)
  236. proto = m.group(2)
  237. if require_protocol and not proto:
  238. return url # not protocol, no linkify
  239. if proto and proto not in permitted_protocols:
  240. return url # bad protocol, no linkify
  241. href = m.group(1)
  242. if not proto:
  243. href = "http://" + href # no proto specified, use http
  244. if callable(extra_params):
  245. params = " " + extra_params(href).strip()
  246. else:
  247. params = extra_params
  248. # clip long urls. max_len is just an approximation
  249. max_len = 30
  250. if shorten and len(url) > max_len:
  251. before_clip = url
  252. if proto:
  253. proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
  254. else:
  255. proto_len = 0
  256. parts = url[proto_len:].split("/")
  257. if len(parts) > 1:
  258. # Grab the whole host part plus the first bit of the path
  259. # The path is usually not that interesting once shortened
  260. # (no more slug, etc), so it really just provides a little
  261. # extra indication of shortening.
  262. url = (
  263. url[:proto_len]
  264. + parts[0]
  265. + "/"
  266. + parts[1][:8].split("?")[0].split(".")[0]
  267. )
  268. if len(url) > max_len * 1.5: # still too long
  269. url = url[:max_len]
  270. if url != before_clip:
  271. amp = url.rfind("&")
  272. # avoid splitting html char entities
  273. if amp > max_len - 5:
  274. url = url[:amp]
  275. url += "..."
  276. if len(url) >= len(before_clip):
  277. url = before_clip
  278. else:
  279. # full url is visible on mouse-over (for those who don't
  280. # have a status bar, such as Safari by default)
  281. params += ' title="%s"' % href
  282. return u'<a href="%s"%s>%s</a>' % (href, params, url)
  283. # First HTML-escape so that our strings are all safe.
  284. # The regex is modified to avoid character entites other than &amp; so
  285. # that we won't pick up &quot;, etc.
  286. text = _unicode(xhtml_escape(text))
  287. return _URL_RE.sub(make_link, text)
  288. def _convert_entity(m: typing.Match) -> str:
  289. if m.group(1) == "#":
  290. try:
  291. if m.group(2)[:1].lower() == "x":
  292. return chr(int(m.group(2)[1:], 16))
  293. else:
  294. return chr(int(m.group(2)))
  295. except ValueError:
  296. return "&#%s;" % m.group(2)
  297. try:
  298. return _HTML_UNICODE_MAP[m.group(2)]
  299. except KeyError:
  300. return "&%s;" % m.group(2)
  301. def _build_unicode_map() -> Dict[str, str]:
  302. unicode_map = {}
  303. for name, value in html.entities.name2codepoint.items():
  304. unicode_map[name] = chr(value)
  305. return unicode_map
  306. _HTML_UNICODE_MAP = _build_unicode_map()